summary refs log tree commit diff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/Space.c29
-rw-r--r--drivers/net/arcnet/com20020_cs.c1
-rw-r--r--drivers/net/bonding/Makefile2
-rw-r--r--drivers/net/bonding/bond_3ad.c900
-rw-r--r--drivers/net/bonding/bond_3ad.h2
-rw-r--r--drivers/net/bonding/bond_alb.c49
-rw-r--r--drivers/net/bonding/bond_alb.h3
-rw-r--r--drivers/net/bonding/bond_main.c569
-rw-r--r--drivers/net/bonding/bond_netlink.c464
-rw-r--r--drivers/net/bonding/bond_options.c1191
-rw-r--r--drivers/net/bonding/bond_options.h170
-rw-r--r--drivers/net/bonding/bond_procfs.c37
-rw-r--r--drivers/net/bonding/bond_sysfs.c896
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c144
-rw-r--r--drivers/net/bonding/bonding.h38
-rw-r--r--drivers/net/caif/caif_spi_slave.c1
-rw-r--r--drivers/net/can/Kconfig4
-rw-r--r--drivers/net/can/at91_can.c1
-rw-r--r--drivers/net/can/bfin_can.c1
-rw-r--r--drivers/net/can/c_can/c_can.c22
-rw-r--r--drivers/net/can/dev.c3
-rw-r--r--drivers/net/can/janz-ican3.c1
-rw-r--r--drivers/net/can/mcp251x.c121
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c3
-rw-r--r--drivers/net/can/mscan/mscan.c3
-rw-r--r--drivers/net/can/mscan/mscan.h3
-rw-r--r--drivers/net/can/pch_can.c4
-rw-r--r--drivers/net/can/sja1000/ems_pci.c3
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c3
-rw-r--r--drivers/net/can/sja1000/plx_pci.c26
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c3
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c3
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c3
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/softing/softing_cs.c3
-rw-r--r--drivers/net/can/softing/softing_fw.c3
-rw-r--r--drivers/net/can/softing/softing_main.c4
-rw-r--r--drivers/net/can/ti_hecc.c11
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/esd_usb2.c1
-rw-r--r--drivers/net/can/usb/kvaser_usb.c1
-rw-r--r--drivers/net/can/usb/usb_8dev.c1
-rw-r--r--drivers/net/eql.c95
-rw-r--r--drivers/net/ethernet/3com/3c509.c3
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c1
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c1
-rw-r--r--drivers/net/ethernet/3com/3c59x.c6
-rw-r--r--drivers/net/ethernet/8390/8390.h7
-rw-r--r--drivers/net/ethernet/8390/apne.c62
-rw-r--r--drivers/net/ethernet/8390/ax88796.c23
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c120
-rw-r--r--drivers/net/ethernet/8390/etherh.c53
-rw-r--r--drivers/net/ethernet/8390/hydra.c11
-rw-r--r--drivers/net/ethernet/8390/lib8390.c77
-rw-r--r--drivers/net/ethernet/8390/mac8390.c19
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c9
-rw-r--r--drivers/net/ethernet/8390/ne.c96
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c54
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c63
-rw-r--r--drivers/net/ethernet/8390/smc-ultra.c48
-rw-r--r--drivers/net/ethernet/8390/stnic.c28
-rw-r--r--drivers/net/ethernet/8390/wd.c42
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c22
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c22
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c3
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c1
-rw-r--r--drivers/net/ethernet/alteon/acenic.c1
-rw-r--r--drivers/net/ethernet/amd/7990.c837
-rw-r--r--drivers/net/ethernet/amd/7990.h268
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c5
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h6
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c4
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.h3
-rw-r--r--drivers/net/ethernet/amd/hplance.c96
-rw-r--r--drivers/net/ethernet/amd/mvme147.c36
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c1
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c2
-rw-r--r--drivers/net/ethernet/amd/sunlance.c1
-rw-r--r--drivers/net/ethernet/arc/emac.h2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c20
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h3
-rw-r--r--drivers/net/ethernet/atheros/alx/ethtool.c101
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.c58
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.h71
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c50
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h52
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c31
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e.h1
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c30
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c46
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.h1
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c250
-rw-r--r--drivers/net/ethernet/broadcom/b44.h15
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c364
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h94
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c60
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c101
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h124
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c66
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c209
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c277
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c181
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c39
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c1
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h4
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c230
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h11
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c625
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h8
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c40
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h33
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h24
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c58
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c251
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h57
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c559
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h26
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h4
-rw-r--r--drivers/net/ethernet/cadence/macb.c126
-rw-r--r--drivers/net/ethernet/cadence/macb.h1
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cphy.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/elmer0.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/gmac.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/mv88x201x.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/regs.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c139
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c4
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c24
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/media.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c1
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h1
-rw-r--r--drivers/net/ethernet/dnet.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h7
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c178
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h33
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c73
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c164
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c7
-rw-r--r--drivers/net/ethernet/freescale/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c9
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c17
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c23
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c99
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_sysfs.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c5
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c1
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c1
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c1
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c1
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h1
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h3
-rw-r--r--drivers/net/ethernet/icplus/ipg.h1
-rw-r--r--drivers/net/ethernet/intel/Kconfig49
-rw-r--r--drivers/net/ethernet/intel/Makefile1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h127
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c237
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h136
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c666
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c469
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h107
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c316
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c400
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c432
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1753
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c77
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h53
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c662
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h170
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c195
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h64
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h152
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c876
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h11
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile33
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c927
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h106
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2153
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_alloc.h55
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c254
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h238
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h165
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h72
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h84
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h4667
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_status.h97
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c1575
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h296
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h1152
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h364
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h321
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c390
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c2353
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c772
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c89
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h28
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c108
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c303
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h65
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c120
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c82
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c84
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h18
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c76
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h96
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1276
-rw-r--r--drivers/net/ethernet/korina.c1
-rw-r--r--drivers/net/ethernet/lantiq_etop.c3
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c28
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c387
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c24
-rw-r--r--drivers/net/ethernet/marvell/sky2.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c198
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c142
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c1
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c1
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c12
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c5
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c35
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h1
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c37
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.h8
-rw-r--r--drivers/net/ethernet/netx-eth.c3
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c6
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h3
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c17
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.h3
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h197
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c425
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h45
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c28
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c49
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c31
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c144
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c127
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c167
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c435
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c257
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c236
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c133
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c1
-rw-r--r--drivers/net/ethernet/rdc/r6040.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c263
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h56
-rw-r--r--drivers/net/ethernet/s6gmac.c1
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c549
-rw-r--r--drivers/net/ethernet/sfc/efx.c201
-rw-r--r--drivers/net/ethernet/sfc/efx.h16
-rw-r--r--drivers/net/ethernet/sfc/enum.h1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c16
-rw-r--r--drivers/net/ethernet/sfc/falcon.c38
-rw-r--r--drivers/net/ethernet/sfc/farch.c48
-rw-r--r--drivers/net/ethernet/sfc/filter.h17
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c444
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h21
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c76
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h733
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c89
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h75
-rw-r--r--drivers/net/ethernet/sfc/nic.c12
-rw-r--r--drivers/net/ethernet/sfc/nic.h34
-rw-r--r--drivers/net/ethernet/sfc/ptp.c854
-rw-r--r--drivers/net/ethernet/sfc/rx.c24
-rw-r--r--drivers/net/ethernet/sfc/selftest.c2
-rw-r--r--drivers/net/ethernet/sfc/selftest.h1
-rw-r--r--drivers/net/ethernet/sfc/siena.c119
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c1
-rw-r--r--drivers/net/ethernet/sgi/meth.c1
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c4
-rw-r--r--drivers/net/ethernet/smsc/smc911x.h3
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c1
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h3
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c3
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.h3
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c5
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c140
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c471
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c135
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.h4
-rw-r--r--drivers/net/ethernet/sun/niu.c10
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c1
-rw-r--r--drivers/net/ethernet/sun/sungem.c1
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c137
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c18
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h2
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c7
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c2
-rw-r--r--drivers/net/ethernet/tile/Kconfig12
-rw-r--r--drivers/net/ethernet/tile/tilegx.c38
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c18
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c16
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c1
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.h4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c1
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c3
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c36
-rw-r--r--drivers/net/fddi/defxx.c21
-rw-r--r--drivers/net/fddi/skfp/fplustm.c27
-rw-r--r--drivers/net/fddi/skfp/h/supern_2.h96
-rw-r--r--drivers/net/fddi/skfp/h/targetos.h1
-rw-r--r--drivers/net/fddi/skfp/skfddi.c1
-rw-r--r--drivers/net/fddi/skfp/smt.c2
-rw-r--r--drivers/net/fddi/skfp/srf.c24
-rw-r--r--drivers/net/hamradio/6pack.c3
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hamradio/mkiss.c3
-rw-r--r--drivers/net/hippi/rrunner.c6
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/hyperv/rndis_filter.c3
-rw-r--r--drivers/net/ieee802154/at86rf230.c2
-rw-r--r--drivers/net/ieee802154/mrf24j40.c1
-rw-r--r--drivers/net/irda/Kconfig4
-rw-r--r--drivers/net/irda/au1k_ir.c4
-rw-r--r--drivers/net/irda/esi-sir.c4
-rw-r--r--drivers/net/irda/irda-usb.c1
-rw-r--r--drivers/net/irda/kingsun-sir.c1
-rw-r--r--drivers/net/irda/ks959-sir.c1
-rw-r--r--drivers/net/irda/ksdazzle-sir.c1
-rw-r--r--drivers/net/irda/litelink-sir.c4
-rw-r--r--drivers/net/irda/ma600-sir.c4
-rw-r--r--drivers/net/irda/mcs7780.c1
-rw-r--r--drivers/net/irda/old_belkin-sir.c4
-rw-r--r--drivers/net/irda/sh_irda.c2
-rw-r--r--drivers/net/irda/sh_sir.c2
-rw-r--r--drivers/net/irda/sir_dongle.c1
-rw-r--r--drivers/net/irda/smsc-ircc2.c4
-rw-r--r--drivers/net/irda/smsc-ircc2.h4
-rw-r--r--drivers/net/irda/stir4200.c1
-rw-r--r--drivers/net/irda/via-ircc.c5
-rw-r--r--drivers/net/irda/via-ircc.h3
-rw-r--r--drivers/net/irda/vlsi_ir.c7
-rw-r--r--drivers/net/irda/vlsi_ir.h4
-rw-r--r--drivers/net/loopback.c1
-rw-r--r--drivers/net/macvlan.c38
-rw-r--r--drivers/net/macvtap.c77
-rw-r--r--drivers/net/mdio.c28
-rw-r--r--drivers/net/phy/cicada.c4
-rw-r--r--drivers/net/phy/davicom.c2
-rw-r--r--drivers/net/phy/dp83640.c4
-rw-r--r--drivers/net/phy/icplus.c2
-rw-r--r--drivers/net/phy/lxt.c4
-rw-r--r--drivers/net/phy/marvell.c22
-rw-r--r--drivers/net/phy/mdio-gpio.c1
-rw-r--r--drivers/net/phy/mdio-moxart.c1
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c1
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c1
-rw-r--r--drivers/net/phy/mdio-octeon.c1
-rw-r--r--drivers/net/phy/mdio-sun4i.c1
-rw-r--r--drivers/net/phy/mdio_bus.c35
-rw-r--r--drivers/net/phy/micrel.c4
-rw-r--r--drivers/net/phy/phy.c429
-rw-r--r--drivers/net/phy/phy_device.c419
-rw-r--r--drivers/net/phy/spi_ks8995.c7
-rw-r--r--drivers/net/plip/plip.c4
-rw-r--r--drivers/net/ppp/ppp_mppe.c3
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/team/team.c6
-rw-r--r--drivers/net/team/team_mode_random.c8
-rw-r--r--drivers/net/tun.c68
-rw-r--r--drivers/net/usb/Kconfig5
-rw-r--r--drivers/net/usb/asix.h4
-rw-r--r--drivers/net/usb/asix_common.c3
-rw-r--r--drivers/net/usb/asix_devices.c3
-rw-r--r--drivers/net/usb/ax88172a.c3
-rw-r--r--drivers/net/usb/ax88179_178a.c3
-rw-r--r--drivers/net/usb/catc.c4
-rw-r--r--drivers/net/usb/cdc_eem.c4
-rw-r--r--drivers/net/usb/cdc_ether.c5
-rw-r--r--drivers/net/usb/cdc_ncm.c1
-rw-r--r--drivers/net/usb/cdc_subset.c4
-rw-r--r--drivers/net/usb/cx82310_eth.c4
-rw-r--r--drivers/net/usb/dm9601.c1
-rw-r--r--drivers/net/usb/gl620a.c4
-rw-r--r--drivers/net/usb/int51x1.c3
-rw-r--r--drivers/net/usb/ipheth.c1
-rw-r--r--drivers/net/usb/kalmia.c1
-rw-r--r--drivers/net/usb/kaweth.c4
-rw-r--r--drivers/net/usb/lg-vl600.c3
-rw-r--r--drivers/net/usb/mcs7830.c4
-rw-r--r--drivers/net/usb/net1080.c4
-rw-r--r--drivers/net/usb/plusb.c4
-rw-r--r--drivers/net/usb/r8152.c916
-rw-r--r--drivers/net/usb/r815x.c8
-rw-r--r--drivers/net/usb/rndis_host.c4
-rw-r--r--drivers/net/usb/rtl8150.c1
-rw-r--r--drivers/net/usb/sierra_net.c3
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/smsc75xx.h3
-rw-r--r--drivers/net/usb/smsc95xx.c4
-rw-r--r--drivers/net/usb/smsc95xx.h3
-rw-r--r--drivers/net/usb/sr9700.c1
-rw-r--r--drivers/net/usb/usbnet.c3
-rw-r--r--drivers/net/usb/zaurus.c4
-rw-r--r--drivers/net/virtio_net.c254
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c5
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h1
-rw-r--r--drivers/net/vxlan.c308
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/hd64570.c1
-rw-r--r--drivers/net/wan/hd64570.h4
-rw-r--r--drivers/net/wan/hd64572.c1
-rw-r--r--drivers/net/wan/hd64572.h2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c3
-rw-r--r--drivers/net/wan/pc300too.c1
-rw-r--r--drivers/net/wan/pci200syn.c1
-rw-r--r--drivers/net/wan/sbni.c1
-rw-r--r--drivers/net/wan/wanxl.c1
-rw-r--r--drivers/net/wireless/adm8211.c4
-rw-r--r--drivers/net/wireless/airo_cs.c1
-rw-r--r--drivers/net/wireless/at76c50x-usb.c3
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c1
-rw-r--r--drivers/net/wireless/ath/ath.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig7
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c53
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c43
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h34
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c160
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c31
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c40
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c677
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c791
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h21
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c408
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h157
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c34
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c11
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c11
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig18
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile14
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h222
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_buffalo_initvals.h126
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c385
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c65
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c297
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c106
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h24
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_wow.c422
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h128
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h401
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h392
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h575
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h1559
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h117
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h718
-rw-r--r--drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h540
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h85
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p1_initvals.h64
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h572
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h434
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c147
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c632
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h44
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c269
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c89
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h47
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c608
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h75
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c72
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c67
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c51
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c671
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c134
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c247
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h29
-rw-r--r--drivers/net/wireless/ath/ath9k/spectral.c543
-rw-r--r--drivers/net/wireless/ath/ath9k/spectral.h212
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c272
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c588
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c27
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c1
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c13
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c14
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c1
-rw-r--r--drivers/net/wireless/ath/main.c8
-rw-r--r--drivers/net/wireless/ath/regd.c379
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c65
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h1
-rw-r--r--drivers/net/wireless/atmel.c8
-rw-r--r--drivers/net/wireless/atmel.h4
-rw-r--r--drivers/net/wireless/atmel_cs.c5
-rw-r--r--drivers/net/wireless/atmel_pci.c5
-rw-r--r--drivers/net/wireless/b43/b43.h4
-rw-r--r--drivers/net/wireless/b43/main.c27
-rw-r--r--drivers/net/wireless/b43/xmit.c4
-rw-r--r--drivers/net/wireless/b43legacy/main.c1
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcdc.c375
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcdc.h24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c737
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c539
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h487
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h44
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c392
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c31
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c36
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c207
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h42
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c1596
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.h54
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.c19
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.h61
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h304
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c216
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/nvram.c94
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/nvram.h24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c52
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/proto.c62
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/proto.h57
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c827
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h39
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h83
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h33
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c9
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c194
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c38
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c67
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_wifi.h14
-rw-r--r--drivers/net/wireless/cw1200/cw1200_sdio.c4
-rw-r--r--drivers/net/wireless/cw1200/fwio.c1
-rw-r--r--drivers/net/wireless/cw1200/main.c2
-rw-r--r--drivers/net/wireless/cw1200/pm.c11
-rw-r--r--drivers/net/wireless/cw1200/scan.c15
-rw-r--r--drivers/net/wireless/cw1200/sta.c5
-rw-r--r--drivers/net/wireless/cw1200/txrx.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c8
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c28
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c6
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c8
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c5
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c42
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.h1
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c12
-rw-r--r--drivers/net/wireless/iwlegacy/3945-debug.c6
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c11
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c1
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c5
-rw-r--r--drivers/net/wireless/iwlegacy/4965-debug.c6
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c8
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c1
-rw-r--r--drivers/net/wireless/iwlegacy/4965.c1
-rw-r--r--drivers/net/wireless/iwlegacy/common.c13
-rw-r--r--drivers/net/wireless/iwlegacy/debug.c10
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.h4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c10
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c18
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h9
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c9
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c43
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h50
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h87
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/binding.c20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c21
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c57
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c546
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c689
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.h101
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h9
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h17
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h69
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c32
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/led.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c211
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c342
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h63
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c51
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c28
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c400
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power_legacy.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c2192
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h154
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c72
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sf.c291
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c50
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/testmode.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c27
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c21
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c9
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h65
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c437
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c183
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c172
-rw-r--r--drivers/net/wireless/libertas/README5
-rw-r--r--drivers/net/wireless/libertas/cfg.c7
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c6
-rw-r--r--drivers/net/wireless/libertas/if_spi.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c1213
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h18
-rw-r--r--drivers/net/wireless/mwifiex/11n.c2
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c6
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig4
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c70
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c4
-rw-r--r--drivers/net/wireless/mwifiex/decl.h1
-rw-r--r--drivers/net/wireless/mwifiex/fw.h41
-rw-r--r--drivers/net/wireless/mwifiex/init.c3
-rw-r--r--drivers/net/wireless/mwifiex/main.c11
-rw-r--r--drivers/net/wireless/mwifiex/main.h7
-rw-r--r--drivers/net/wireless/mwifiex/scan.c8
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c80
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c38
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c20
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c46
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c1
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c15
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c39
-rw-r--r--drivers/net/wireless/mwifiex/usb.c58
-rw-r--r--drivers/net/wireless/mwifiex/usb.h12
-rw-r--r--drivers/net/wireless/mwifiex/util.c5
-rw-r--r--drivers/net/wireless/mwl8k.c5
-rw-r--r--drivers/net/wireless/orinoco/hermes.c1
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c1
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c1
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c1
-rw-r--r--drivers/net/wireless/p54/eeprom.c1
-rw-r--r--drivers/net/wireless/p54/fwio.c1
-rw-r--r--drivers/net/wireless/p54/led.c1
-rw-r--r--drivers/net/wireless/p54/main.c2
-rw-r--r--drivers/net/wireless/p54/net2280.h3
-rw-r--r--drivers/net/wireless/p54/p54pci.c1
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/p54/txrx.c5
-rw-r--r--drivers/net/wireless/prism54/isl_38xx.c3
-rw-r--r--drivers/net/wireless/prism54/isl_38xx.h3
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c6
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.h3
-rw-r--r--drivers/net/wireless/prism54/isl_oid.h3
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c4
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.h3
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c3
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.h3
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c5
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c3
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.h3
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c3
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.h3
-rw-r--r--drivers/net/wireless/prism54/prismcompat.h3
-rw-r--r--drivers/net/wireless/ray_cs.c3
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c41
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800mmio.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800mmio.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800soc.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dump.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mmio.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mmio.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/grf5101.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/max2820.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/sa2400.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c3
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8225.c1
-rw-r--r--drivers/net/wireless/rtlwifi/base.c8
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c4
-rw-r--r--drivers/net/wireless/rtlwifi/core.c11
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c2
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c4
-rw-r--r--drivers/net/wireless/rtlwifi/regd.c61
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c327
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c39
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.c9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.h3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.c29
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c17
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/table.c40
-rw-r--r--drivers/net/wireless/rtlwifi/stats.c14
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c8
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h33
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c258
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.h26
-rw-r--r--drivers/net/wireless/ti/wl1251/boot.c3
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c58
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.h8
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c46
-rw-r--r--drivers/net/wireless/ti/wl1251/event.h7
-rw-r--r--drivers/net/wireless/ti/wl1251/init.c13
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c153
-rw-r--r--drivers/net/wireless/ti/wl1251/rx.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/tx.c35
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h6
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c14
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c12
-rw-r--r--drivers/net/wireless/wl3501_cs.c5
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_def.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c8
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_al2230.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_al7230b.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_rf2959.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_uw2453.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h3
-rw-r--r--drivers/net/xen-netback/common.h28
-rw-r--r--drivers/net/xen-netback/interface.c47
-rw-r--r--drivers/net/xen-netback/netback.c506
-rw-r--r--drivers/net/xen-netback/xenbus.c3
-rw-r--r--drivers/net/xen-netfront.c96
1005 files changed, 56381 insertions, 26692 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b45b240889f5..f342278539d5 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -236,6 +236,7 @@ config VETH
 config VIRTIO_NET
 	tristate "Virtio network driver"
 	depends on VIRTIO
+	select AVERAGE
 	---help---
 	  This is the virtual network driver for virtio.  It can be used with
 	  lguest or QEMU based VMMs (like KVM or Xen).  Say Y or M.
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index a7271e093845..67977f15af25 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -32,39 +32,12 @@
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/netlink.h>
+#include <net/Space.h>
 
 /* A unified ethernet device probe.  This is the easiest way to have every
    ethernet adaptor have the name "eth[0123...]".
    */
 
-extern struct net_device *hp100_probe(int unit);
-extern struct net_device *ultra_probe(int unit);
-extern struct net_device *wd_probe(int unit);
-extern struct net_device *ne_probe(int unit);
-extern struct net_device *fmv18x_probe(int unit);
-extern struct net_device *i82596_probe(int unit);
-extern struct net_device *ni65_probe(int unit);
-extern struct net_device *sonic_probe(int unit);
-extern struct net_device *smc_init(int unit);
-extern struct net_device *atarilance_probe(int unit);
-extern struct net_device *sun3lance_probe(int unit);
-extern struct net_device *sun3_82586_probe(int unit);
-extern struct net_device *apne_probe(int unit);
-extern struct net_device *cs89x0_probe(int unit);
-extern struct net_device *mvme147lance_probe(int unit);
-extern struct net_device *tc515_probe(int unit);
-extern struct net_device *lance_probe(int unit);
-extern struct net_device *mac8390_probe(int unit);
-extern struct net_device *mac89x0_probe(int unit);
-extern struct net_device *cops_probe(int unit);
-extern struct net_device *ltpc_probe(void);
-
-/* Fibre Channel adapters */
-extern int iph5526_probe(struct net_device *dev);
-
-/* SBNI adapters */
-extern int sbni_probe(int unit);
-
 struct devprobe2 {
 	struct net_device *(*probe)(int unit);
 	int status;	/* non-zero if autoprobe has failed */
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index 74dc1875f9cd..326a612a2730 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -32,7 +32,6 @@
  * **********************
  */
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/ptrace.h>
 #include <linux/slab.h>
 #include <linux/string.h>
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 5a5d720da929..6f4e80853ed4 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -4,7 +4,7 @@
 
 obj-$(CONFIG_BONDING) += bonding.o
 
-bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o bond_netlink.o bond_options.o
+bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_sysfs_slave.o bond_debugfs.o bond_netlink.o bond_options.o
 
 proc-$(CONFIG_PROC_FS) += bond_procfs.o
 bonding-objs += $(proc-y)
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 4ced59436558..cce1f1bf90b4 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -34,14 +34,14 @@
 #include "bonding.h"
 #include "bond_3ad.h"
 
-// General definitions
+/* General definitions */
 #define AD_SHORT_TIMEOUT           1
 #define AD_LONG_TIMEOUT            0
 #define AD_STANDBY                 0x2
 #define AD_MAX_TX_IN_SECOND        3
 #define AD_COLLECTOR_MAX_DELAY     0
 
-// Timer definitions(43.4.4 in the 802.3ad standard)
+/* Timer definitions (43.4.4 in the 802.3ad standard) */
 #define AD_FAST_PERIODIC_TIME      1
 #define AD_SLOW_PERIODIC_TIME      30
 #define AD_SHORT_TIMEOUT_TIME      (3*AD_FAST_PERIODIC_TIME)
@@ -49,7 +49,7 @@
 #define AD_CHURN_DETECTION_TIME    60
 #define AD_AGGREGATE_WAIT_TIME     2
 
-// Port state definitions(43.4.2.2 in the 802.3ad standard)
+/* Port state definitions (43.4.2.2 in the 802.3ad standard) */
 #define AD_STATE_LACP_ACTIVITY   0x1
 #define AD_STATE_LACP_TIMEOUT    0x2
 #define AD_STATE_AGGREGATION     0x4
@@ -59,7 +59,9 @@
 #define AD_STATE_DEFAULTED       0x40
 #define AD_STATE_EXPIRED         0x80
 
-// Port Variables definitions used by the State Machines(43.4.7 in the 802.3ad standard)
+/* Port Variables definitions used by the State Machines (43.4.7 in the
+ * 802.3ad standard)
+ */
 #define AD_PORT_BEGIN           0x1
 #define AD_PORT_LACP_ENABLED    0x2
 #define AD_PORT_ACTOR_CHURN     0x4
@@ -71,27 +73,27 @@
 #define AD_PORT_SELECTED        0x100
 #define AD_PORT_MOVED           0x200
 
-// Port Key definitions
-// key is determined according to the link speed, duplex and
-// user key(which is yet not supported)
-//              ------------------------------------------------------------
-// Port key :   | User key                       |      Speed       |Duplex|
-//              ------------------------------------------------------------
-//              16                               6               1 0
+/* Port Key definitions
+ * key is determined according to the link speed, duplex and
+ * user key (which is yet not supported)
+ * --------------------------------------------------------------
+ * Port key :	| User key	| Speed		| Duplex	|
+ * --------------------------------------------------------------
+ * 16		  6		  1		  0
+ */
 #define  AD_DUPLEX_KEY_BITS    0x1
 #define  AD_SPEED_KEY_BITS     0x3E
 #define  AD_USER_KEY_BITS      0xFFC0
 
-//dalloun
 #define     AD_LINK_SPEED_BITMASK_1MBPS       0x1
 #define     AD_LINK_SPEED_BITMASK_10MBPS      0x2
 #define     AD_LINK_SPEED_BITMASK_100MBPS     0x4
 #define     AD_LINK_SPEED_BITMASK_1000MBPS    0x8
 #define     AD_LINK_SPEED_BITMASK_10000MBPS   0x10
-//endalloun
 
-// compare MAC addresses
-#define MAC_ADDRESS_COMPARE(A, B) memcmp(A, B, ETH_ALEN)
+/* compare MAC addresses */
+#define MAC_ADDRESS_EQUAL(A, B)	\
+	ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
 
 static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } };
 static u16 ad_ticks_per_sec;
@@ -99,7 +101,7 @@ static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
 
 static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
 
-// ================= main 802.3ad protocol functions ==================
+/* ================= main 802.3ad protocol functions ================== */
 static int ad_lacpdu_send(struct port *port);
 static int ad_marker_send(struct port *port, struct bond_marker *marker);
 static void ad_mux_machine(struct port *port);
@@ -113,13 +115,13 @@ static void ad_initialize_agg(struct aggregator *aggregator);
 static void ad_initialize_port(struct port *port, int lacp_fast);
 static void ad_enable_collecting_distributing(struct port *port);
 static void ad_disable_collecting_distributing(struct port *port);
-static void ad_marker_info_received(struct bond_marker *marker_info, struct port *port);
-static void ad_marker_response_received(struct bond_marker *marker, struct port *port);
+static void ad_marker_info_received(struct bond_marker *marker_info,
+				    struct port *port);
+static void ad_marker_response_received(struct bond_marker *marker,
+					struct port *port);
 
 
-/////////////////////////////////////////////////////////////////////////////////
-// ================= api to bonding and kernel code ==================
-/////////////////////////////////////////////////////////////////////////////////
+/* ================= api to bonding and kernel code ================== */
 
 /**
  * __get_bond_by_port - get the port's bonding struct
@@ -141,25 +143,32 @@ static inline struct bonding *__get_bond_by_port(struct port *port)
  *
  * Return the aggregator of the first slave in @bond, or %NULL if it can't be
  * found.
+ * The caller must hold RCU or RTNL lock.
  */
 static inline struct aggregator *__get_first_agg(struct port *port)
 {
 	struct bonding *bond = __get_bond_by_port(port);
 	struct slave *first_slave;
+	struct aggregator *agg;
 
-	// If there's no bond for this port, or bond has no slaves
+	/* If there's no bond for this port, or bond has no slaves */
 	if (bond == NULL)
 		return NULL;
-	first_slave = bond_first_slave(bond);
 
-	return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
+	rcu_read_lock();
+	first_slave = bond_first_slave_rcu(bond);
+	agg = first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
+	rcu_read_unlock();
+
+	return agg;
 }
 
-/*
- * __agg_has_partner
+/**
+ * __agg_has_partner - see if we have a partner
+ * @agg: the agregator we're looking at
  *
  * Return nonzero if aggregator has a partner (denoted by a non-zero ether
- * address for the partner).  Return 0 if not.
+ * address for the partner). Return 0 if not.
  */
 static inline int __agg_has_partner(struct aggregator *agg)
 {
@@ -169,7 +178,6 @@ static inline int __agg_has_partner(struct aggregator *agg)
 /**
  * __disable_port - disable the port's slave
  * @port: the port we're looking at
- *
  */
 static inline void __disable_port(struct port *port)
 {
@@ -179,7 +187,6 @@ static inline void __disable_port(struct port *port)
 /**
  * __enable_port - enable the port's slave, if it's up
  * @port: the port we're looking at
- *
  */
 static inline void __enable_port(struct port *port)
 {
@@ -192,7 +199,6 @@ static inline void __enable_port(struct port *port)
 /**
  * __port_is_enabled - check if the port's slave is in active state
  * @port: the port we're looking at
- *
  */
 static inline int __port_is_enabled(struct port *port)
 {
@@ -218,7 +224,6 @@ static inline u32 __get_agg_selection_mode(struct port *port)
 /**
  * __check_agg_selection_timer - check if the selection timer has expired
  * @port: the port we're looking at
- *
  */
 static inline int __check_agg_selection_timer(struct port *port)
 {
@@ -233,7 +238,6 @@ static inline int __check_agg_selection_timer(struct port *port)
 /**
  * __get_state_machine_lock - lock the port's state machines
  * @port: the port we're looking at
- *
  */
 static inline void __get_state_machine_lock(struct port *port)
 {
@@ -243,7 +247,6 @@ static inline void __get_state_machine_lock(struct port *port)
 /**
  * __release_state_machine_lock - unlock the port's state machines
  * @port: the port we're looking at
- *
  */
 static inline void __release_state_machine_lock(struct port *port)
 {
@@ -266,10 +269,11 @@ static u16 __get_link_speed(struct port *port)
 	struct slave *slave = port->slave;
 	u16 speed;
 
-	/* this if covers only a special case: when the configuration starts with
-	 * link down, it sets the speed to 0.
-	 * This is done in spite of the fact that the e100 driver reports 0 to be
-	 * compatible with MVT in the future.*/
+	/* this if covers only a special case: when the configuration starts
+	 * with link down, it sets the speed to 0.
+	 * This is done in spite of the fact that the e100 driver reports 0
+	 * to be compatible with MVT in the future.
+	 */
 	if (slave->link != BOND_LINK_UP)
 		speed = 0;
 	else {
@@ -291,7 +295,8 @@ static u16 __get_link_speed(struct port *port)
 			break;
 
 		default:
-			speed = 0; // unknown speed value from ethtool. shouldn't happen
+			/* unknown speed value from ethtool. shouldn't happen */
+			speed = 0;
 			break;
 		}
 	}
@@ -315,8 +320,9 @@ static u8 __get_duplex(struct port *port)
 
 	u8 retval;
 
-	//  handling a special case: when the configuration starts with
-	// link down, it sets the duplex to 0.
+	/* handling a special case: when the configuration starts with
+	 * link down, it sets the duplex to 0.
+	 */
 	if (slave->link != BOND_LINK_UP)
 		retval = 0x0;
 	else {
@@ -340,15 +346,14 @@ static u8 __get_duplex(struct port *port)
 /**
  * __initialize_port_locks - initialize a port's STATE machine spinlock
  * @port: the slave of the port we're looking at
- *
  */
 static inline void __initialize_port_locks(struct slave *slave)
 {
-	// make sure it isn't called twice
+	/* make sure it isn't called twice */
 	spin_lock_init(&(SLAVE_AD_INFO(slave).state_machine_lock));
 }
 
-//conversions
+/* Conversions */
 
 /**
  * __ad_timer_to_ticks - convert a given timer type to AD module ticks
@@ -357,39 +362,38 @@ static inline void __initialize_port_locks(struct slave *slave)
  *
  * If @timer_type is %current_while_timer, @par indicates long/short timer.
  * If @timer_type is %periodic_timer, @par is one of %FAST_PERIODIC_TIME,
- *						    %SLOW_PERIODIC_TIME.
+ *						     %SLOW_PERIODIC_TIME.
  */
 static u16 __ad_timer_to_ticks(u16 timer_type, u16 par)
 {
 	u16 retval = 0; /* to silence the compiler */
 
 	switch (timer_type) {
-	case AD_CURRENT_WHILE_TIMER:   // for rx machine usage
+	case AD_CURRENT_WHILE_TIMER:	/* for rx machine usage */
 		if (par)
-			retval = (AD_SHORT_TIMEOUT_TIME*ad_ticks_per_sec); // short timeout
+			retval = (AD_SHORT_TIMEOUT_TIME*ad_ticks_per_sec);
 		else
-			retval = (AD_LONG_TIMEOUT_TIME*ad_ticks_per_sec); // long timeout
+			retval = (AD_LONG_TIMEOUT_TIME*ad_ticks_per_sec);
 		break;
-	case AD_ACTOR_CHURN_TIMER:	    // for local churn machine
+	case AD_ACTOR_CHURN_TIMER:	/* for local churn machine */
 		retval = (AD_CHURN_DETECTION_TIME*ad_ticks_per_sec);
 		break;
-	case AD_PERIODIC_TIMER:	    // for periodic machine
-		retval = (par*ad_ticks_per_sec); // long timeout
+	case AD_PERIODIC_TIMER:		/* for periodic machine */
+		retval = (par*ad_ticks_per_sec); /* long timeout */
 		break;
-	case AD_PARTNER_CHURN_TIMER:   // for remote churn machine
+	case AD_PARTNER_CHURN_TIMER:	/* for remote churn machine */
 		retval = (AD_CHURN_DETECTION_TIME*ad_ticks_per_sec);
 		break;
-	case AD_WAIT_WHILE_TIMER:	    // for selection machine
+	case AD_WAIT_WHILE_TIMER:	/* for selection machine */
 		retval = (AD_AGGREGATE_WAIT_TIME*ad_ticks_per_sec);
 		break;
 	}
+
 	return retval;
 }
 
 
-/////////////////////////////////////////////////////////////////////////////////
-// ================= ad_rx_machine helper functions ==================
-/////////////////////////////////////////////////////////////////////////////////
+/* ================= ad_rx_machine helper functions ================== */
 
 /**
  * __choose_matched - update a port's matched variable from a received lacpdu
@@ -416,17 +420,18 @@ static u16 __ad_timer_to_ticks(u16 timer_type, u16 par)
  */
 static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
 {
-	// check if all parameters are alike
+	/* check if all parameters are alike
+	 * or this is individual link(aggregation == FALSE)
+	 * then update the state machine Matched variable.
+	 */
 	if (((ntohs(lacpdu->partner_port) == port->actor_port_number) &&
 	     (ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) &&
-	     !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) &&
+	     MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) &&
 	     (ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
 	     (ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
 	     ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
-	    // or this is individual link(aggregation == FALSE)
 	    ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
 		) {
-		// update the state machine Matched variable
 		port->sm_vars |= AD_PORT_MATCHED;
 	} else {
 		port->sm_vars &= ~AD_PORT_MATCHED;
@@ -448,7 +453,9 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
 		struct port_params *partner = &port->partner_oper;
 
 		__choose_matched(lacpdu, port);
-		// record the new parameter values for the partner operational
+		/* record the new parameter values for the partner
+		 * operational
+		 */
 		partner->port_number = ntohs(lacpdu->actor_port);
 		partner->port_priority = ntohs(lacpdu->actor_port_priority);
 		partner->system = lacpdu->actor_system;
@@ -456,10 +463,12 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
 		partner->key = ntohs(lacpdu->actor_key);
 		partner->port_state = lacpdu->actor_state;
 
-		// set actor_oper_port_state.defaulted to FALSE
+		/* set actor_oper_port_state.defaulted to FALSE */
 		port->actor_oper_port_state &= ~AD_STATE_DEFAULTED;
 
-		// set the partner sync. to on if the partner is sync. and the port is matched
+		/* set the partner sync. to on if the partner is sync,
+		 * and the port is matched
+		 */
 		if ((port->sm_vars & AD_PORT_MATCHED)
 		    && (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION))
 			partner->port_state |= AD_STATE_SYNCHRONIZATION;
@@ -479,11 +488,11 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
 static void __record_default(struct port *port)
 {
 	if (port) {
-		// record the partner admin parameters
+		/* record the partner admin parameters */
 		memcpy(&port->partner_oper, &port->partner_admin,
 		       sizeof(struct port_params));
 
-		// set actor_oper_port_state.defaulted to true
+		/* set actor_oper_port_state.defaulted to true */
 		port->actor_oper_port_state |= AD_STATE_DEFAULTED;
 	}
 }
@@ -506,14 +515,15 @@ static void __update_selected(struct lacpdu *lacpdu, struct port *port)
 	if (lacpdu && port) {
 		const struct port_params *partner = &port->partner_oper;
 
-		// check if any parameter is different
+		/* check if any parameter is different then
+		 * update the state machine selected variable.
+		 */
 		if (ntohs(lacpdu->actor_port) != partner->port_number ||
 		    ntohs(lacpdu->actor_port_priority) != partner->port_priority ||
-		    MAC_ADDRESS_COMPARE(&lacpdu->actor_system, &partner->system) ||
+		    !MAC_ADDRESS_EQUAL(&lacpdu->actor_system, &partner->system) ||
 		    ntohs(lacpdu->actor_system_priority) != partner->system_priority ||
 		    ntohs(lacpdu->actor_key) != partner->key ||
 		    (lacpdu->actor_state & AD_STATE_AGGREGATION) != (partner->port_state & AD_STATE_AGGREGATION)) {
-			// update the state machine Selected variable
 			port->sm_vars &= ~AD_PORT_SELECTED;
 		}
 	}
@@ -537,15 +547,16 @@ static void __update_default_selected(struct port *port)
 		const struct port_params *admin = &port->partner_admin;
 		const struct port_params *oper = &port->partner_oper;
 
-		// check if any parameter is different
+		/* check if any parameter is different then
+		 * update the state machine selected variable.
+		 */
 		if (admin->port_number != oper->port_number ||
 		    admin->port_priority != oper->port_priority ||
-		    MAC_ADDRESS_COMPARE(&admin->system, &oper->system) ||
+		    !MAC_ADDRESS_EQUAL(&admin->system, &oper->system) ||
 		    admin->system_priority != oper->system_priority ||
 		    admin->key != oper->key ||
 		    (admin->port_state & AD_STATE_AGGREGATION)
 			!= (oper->port_state & AD_STATE_AGGREGATION)) {
-			// update the state machine Selected variable
 			port->sm_vars &= ~AD_PORT_SELECTED;
 		}
 	}
@@ -565,12 +576,14 @@ static void __update_default_selected(struct port *port)
  */
 static void __update_ntt(struct lacpdu *lacpdu, struct port *port)
 {
-	// validate lacpdu and port
+	/* validate lacpdu and port */
 	if (lacpdu && port) {
-		// check if any parameter is different
+		/* check if any parameter is different then
+		 * update the port->ntt.
+		 */
 		if ((ntohs(lacpdu->partner_port) != port->actor_port_number) ||
 		    (ntohs(lacpdu->partner_port_priority) != port->actor_port_priority) ||
-		    MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) ||
+		    !MAC_ADDRESS_EQUAL(&(lacpdu->partner_system), &(port->actor_system)) ||
 		    (ntohs(lacpdu->partner_system_priority) != port->actor_system_priority) ||
 		    (ntohs(lacpdu->partner_key) != port->actor_oper_port_key) ||
 		    ((lacpdu->partner_state & AD_STATE_LACP_ACTIVITY) != (port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY)) ||
@@ -578,43 +591,12 @@ static void __update_ntt(struct lacpdu *lacpdu, struct port *port)
 		    ((lacpdu->partner_state & AD_STATE_SYNCHRONIZATION) != (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) ||
 		    ((lacpdu->partner_state & AD_STATE_AGGREGATION) != (port->actor_oper_port_state & AD_STATE_AGGREGATION))
 		   ) {
-
 			port->ntt = true;
 		}
 	}
 }
 
 /**
- * __attach_bond_to_agg
- * @port: the port we're looking at
- *
- * Handle the attaching of the port's control parser/multiplexer and the
- * aggregator. This function does nothing since the parser/multiplexer of the
- * receive and the parser/multiplexer of the aggregator are already combined.
- */
-static void __attach_bond_to_agg(struct port *port)
-{
-	port = NULL; /* just to satisfy the compiler */
-	// This function does nothing since the parser/multiplexer of the receive
-	// and the parser/multiplexer of the aggregator are already combined
-}
-
-/**
- * __detach_bond_from_agg
- * @port: the port we're looking at
- *
- * Handle the detaching of the port's control parser/multiplexer from the
- * aggregator. This function does nothing since the parser/multiplexer of the
- * receive and the parser/multiplexer of the aggregator are already combined.
- */
-static void __detach_bond_from_agg(struct port *port)
-{
-	port = NULL; /* just to satisfy the compiler */
-	// This function does nothing since the parser/multiplexer of the receive
-	// and the parser/multiplexer of the aggregator are already combined
-}
-
-/**
  * __agg_ports_are_ready - check if all ports in an aggregator are ready
  * @aggregator: the aggregator we're looking at
  *
@@ -625,7 +607,9 @@ static int __agg_ports_are_ready(struct aggregator *aggregator)
 	int retval = 1;
 
 	if (aggregator) {
-		// scan all ports in this aggregator to verfy if they are all ready
+		/* scan all ports in this aggregator to verfy if they are
+		 * all ready.
+		 */
 		for (port = aggregator->lag_ports;
 		     port;
 		     port = port->next_port_in_aggregator) {
@@ -685,7 +669,7 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
 			bandwidth = aggregator->num_of_ports * 10000;
 			break;
 		default:
-			bandwidth = 0; /*to silence the compiler ....*/
+			bandwidth = 0; /* to silence the compiler */
 		}
 	}
 	return bandwidth;
@@ -695,6 +679,7 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
  * __get_active_agg - get the current active aggregator
  * @aggregator: the aggregator we're looking at
  *
+ * Caller must hold RCU lock.
  */
 static struct aggregator *__get_active_agg(struct aggregator *aggregator)
 {
@@ -702,7 +687,7 @@ static struct aggregator *__get_active_agg(struct aggregator *aggregator)
 	struct list_head *iter;
 	struct slave *slave;
 
-	bond_for_each_slave(bond, slave, iter)
+	bond_for_each_slave_rcu(bond, slave, iter)
 		if (SLAVE_AD_INFO(slave).aggregator.is_active)
 			return &(SLAVE_AD_INFO(slave).aggregator);
 
@@ -712,15 +697,14 @@ static struct aggregator *__get_active_agg(struct aggregator *aggregator)
 /**
  * __update_lacpdu_from_port - update a port's lacpdu fields
  * @port: the port we're looking at
- *
  */
 static inline void __update_lacpdu_from_port(struct port *port)
 {
 	struct lacpdu *lacpdu = &port->lacpdu;
 	const struct port_params *partner = &port->partner_oper;
 
-	/* update current actual Actor parameters */
-	/* lacpdu->subtype                   initialized
+	/* update current actual Actor parameters
+	 * lacpdu->subtype                   initialized
 	 * lacpdu->version_number            initialized
 	 * lacpdu->tlv_type_actor_info       initialized
 	 * lacpdu->actor_information_length  initialized
@@ -756,9 +740,7 @@ static inline void __update_lacpdu_from_port(struct port *port)
 	 */
 }
 
-//////////////////////////////////////////////////////////////////////////////////////
-// ================= main 802.3ad protocol code ======================================
-//////////////////////////////////////////////////////////////////////////////////////
+/* ================= main 802.3ad protocol code ========================= */
 
 /**
  * ad_lacpdu_send - send out a lacpdu packet on a given port
@@ -788,11 +770,12 @@ static int ad_lacpdu_send(struct port *port)
 
 	memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
 	/* Note: source address is set to be the member's PERMANENT address,
-	   because we use it to identify loopback lacpdus in receive. */
+	 * because we use it to identify loopback lacpdus in receive.
+	 */
 	memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
 	lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU;
 
-	lacpdu_header->lacpdu = port->lacpdu; // struct copy
+	lacpdu_header->lacpdu = port->lacpdu;
 
 	dev_queue_xmit(skb);
 
@@ -829,11 +812,12 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
 
 	memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
 	/* Note: source address is set to be the member's PERMANENT address,
-	   because we use it to identify loopback MARKERs in receive. */
+	 * because we use it to identify loopback MARKERs in receive.
+	 */
 	memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
 	marker_header->hdr.h_proto = PKT_TYPE_LACPDU;
 
-	marker_header->marker = *marker; // struct copy
+	marker_header->marker = *marker;
 
 	dev_queue_xmit(skb);
 
@@ -843,72 +827,90 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
 /**
  * ad_mux_machine - handle a port's mux state machine
  * @port: the port we're looking at
- *
  */
 static void ad_mux_machine(struct port *port)
 {
 	mux_states_t last_state;
 
-	// keep current State Machine state to compare later if it was changed
+	/* keep current State Machine state to compare later if it was
+	 * changed
+	 */
 	last_state = port->sm_mux_state;
 
 	if (port->sm_vars & AD_PORT_BEGIN) {
-		port->sm_mux_state = AD_MUX_DETACHED;		 // next state
+		port->sm_mux_state = AD_MUX_DETACHED;
 	} else {
 		switch (port->sm_mux_state) {
 		case AD_MUX_DETACHED:
 			if ((port->sm_vars & AD_PORT_SELECTED)
 			    || (port->sm_vars & AD_PORT_STANDBY))
 				/* if SELECTED or STANDBY */
-				port->sm_mux_state = AD_MUX_WAITING; // next state
+				port->sm_mux_state = AD_MUX_WAITING;
 			break;
 		case AD_MUX_WAITING:
-			// if SELECTED == FALSE return to DETACH state
-			if (!(port->sm_vars & AD_PORT_SELECTED)) { // if UNSELECTED
+			/* if SELECTED == FALSE return to DETACH state */
+			if (!(port->sm_vars & AD_PORT_SELECTED)) {
 				port->sm_vars &= ~AD_PORT_READY_N;
-				// in order to withhold the Selection Logic to check all ports READY_N value
-				// every callback cycle to update ready variable, we check READY_N and update READY here
+				/* in order to withhold the Selection Logic to
+				 * check all ports READY_N value every callback
+				 * cycle to update ready variable, we check
+				 * READY_N and update READY here
+				 */
 				__set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
-				port->sm_mux_state = AD_MUX_DETACHED;	 // next state
+				port->sm_mux_state = AD_MUX_DETACHED;
 				break;
 			}
 
-			// check if the wait_while_timer expired
+			/* check if the wait_while_timer expired */
 			if (port->sm_mux_timer_counter
 			    && !(--port->sm_mux_timer_counter))
 				port->sm_vars |= AD_PORT_READY_N;
 
-			// in order to withhold the selection logic to check all ports READY_N value
-			// every callback cycle to update ready variable, we check READY_N and update READY here
+			/* in order to withhold the selection logic to check
+			 * all ports READY_N value every callback cycle to
+			 * update ready variable, we check READY_N and update
+			 * READY here
+			 */
 			__set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
 
-			// if the wait_while_timer expired, and the port is in READY state, move to ATTACHED state
+			/* if the wait_while_timer expired, and the port is
+			 * in READY state, move to ATTACHED state
+			 */
 			if ((port->sm_vars & AD_PORT_READY)
 			    && !port->sm_mux_timer_counter)
-				port->sm_mux_state = AD_MUX_ATTACHED;	 // next state
+				port->sm_mux_state = AD_MUX_ATTACHED;
 			break;
 		case AD_MUX_ATTACHED:
-			// check also if agg_select_timer expired(so the edable port will take place only after this timer)
-			if ((port->sm_vars & AD_PORT_SELECTED) && (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) && !__check_agg_selection_timer(port)) {
-				port->sm_mux_state = AD_MUX_COLLECTING_DISTRIBUTING;// next state
-			} else if (!(port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY)) {	  // if UNSELECTED or STANDBY
+			/* check also if agg_select_timer expired (so the
+			 * edable port will take place only after this timer)
+			 */
+			if ((port->sm_vars & AD_PORT_SELECTED) &&
+			    (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) &&
+			    !__check_agg_selection_timer(port)) {
+				port->sm_mux_state = AD_MUX_COLLECTING_DISTRIBUTING;
+			} else if (!(port->sm_vars & AD_PORT_SELECTED) ||
+				   (port->sm_vars & AD_PORT_STANDBY)) {
+				/* if UNSELECTED or STANDBY */
 				port->sm_vars &= ~AD_PORT_READY_N;
-				// in order to withhold the selection logic to check all ports READY_N value
-				// every callback cycle to update ready variable, we check READY_N and update READY here
+				/* in order to withhold the selection logic to
+				 * check all ports READY_N value every callback
+				 * cycle to update ready variable, we check
+				 * READY_N and update READY here
+				 */
 				__set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
-				port->sm_mux_state = AD_MUX_DETACHED;// next state
+				port->sm_mux_state = AD_MUX_DETACHED;
 			}
 			break;
 		case AD_MUX_COLLECTING_DISTRIBUTING:
-			if (!(port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY) ||
-			    !(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION)
-			   ) {
-				port->sm_mux_state = AD_MUX_ATTACHED;// next state
-
+			if (!(port->sm_vars & AD_PORT_SELECTED) ||
+			    (port->sm_vars & AD_PORT_STANDBY) ||
+			    !(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION)) {
+				port->sm_mux_state = AD_MUX_ATTACHED;
 			} else {
-				// if port state hasn't changed make
-				// sure that a collecting distributing
-				// port in an active aggregator is enabled
+				/* if port state hasn't changed make
+				 * sure that a collecting distributing
+				 * port in an active aggregator is enabled
+				 */
 				if (port->aggregator &&
 				    port->aggregator->is_active &&
 				    !__port_is_enabled(port)) {
@@ -917,19 +919,18 @@ static void ad_mux_machine(struct port *port)
 				}
 			}
 			break;
-		default:    //to silence the compiler
+		default:
 			break;
 		}
 	}
 
-	// check if the state machine was changed
+	/* check if the state machine was changed */
 	if (port->sm_mux_state != last_state) {
 		pr_debug("Mux Machine: Port=%d, Last State=%d, Curr State=%d\n",
 			 port->actor_port_number, last_state,
 			 port->sm_mux_state);
 		switch (port->sm_mux_state) {
 		case AD_MUX_DETACHED:
-			__detach_bond_from_agg(port);
 			port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
 			ad_disable_collecting_distributing(port);
 			port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
@@ -940,7 +941,6 @@ static void ad_mux_machine(struct port *port)
 			port->sm_mux_timer_counter = __ad_timer_to_ticks(AD_WAIT_WHILE_TIMER, 0);
 			break;
 		case AD_MUX_ATTACHED:
-			__attach_bond_to_agg(port);
 			port->actor_oper_port_state |= AD_STATE_SYNCHRONIZATION;
 			port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
 			port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
@@ -953,7 +953,7 @@ static void ad_mux_machine(struct port *port)
 			ad_enable_collecting_distributing(port);
 			port->ntt = true;
 			break;
-		default:    //to silence the compiler
+		default:
 			break;
 		}
 	}
@@ -972,59 +972,63 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
 {
 	rx_states_t last_state;
 
-	// keep current State Machine state to compare later if it was changed
+	/* keep current State Machine state to compare later if it was
+	 * changed
+	 */
 	last_state = port->sm_rx_state;
 
-	// check if state machine should change state
-	// first, check if port was reinitialized
+	/* check if state machine should change state */
+
+	/* first, check if port was reinitialized */
 	if (port->sm_vars & AD_PORT_BEGIN)
-		/* next state */
 		port->sm_rx_state = AD_RX_INITIALIZE;
-	// check if port is not enabled
+	/* check if port is not enabled */
 	else if (!(port->sm_vars & AD_PORT_BEGIN)
 		 && !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED))
-		/* next state */
 		port->sm_rx_state = AD_RX_PORT_DISABLED;
-	// check if new lacpdu arrived
-	else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) || (port->sm_rx_state == AD_RX_DEFAULTED) || (port->sm_rx_state == AD_RX_CURRENT))) {
-		port->sm_rx_timer_counter = 0; // zero timer
+	/* check if new lacpdu arrived */
+	else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) ||
+		 (port->sm_rx_state == AD_RX_DEFAULTED) ||
+		 (port->sm_rx_state == AD_RX_CURRENT))) {
+		port->sm_rx_timer_counter = 0;
 		port->sm_rx_state = AD_RX_CURRENT;
 	} else {
-		// if timer is on, and if it is expired
-		if (port->sm_rx_timer_counter && !(--port->sm_rx_timer_counter)) {
+		/* if timer is on, and if it is expired */
+		if (port->sm_rx_timer_counter &&
+		    !(--port->sm_rx_timer_counter)) {
 			switch (port->sm_rx_state) {
 			case AD_RX_EXPIRED:
-				port->sm_rx_state = AD_RX_DEFAULTED;		// next state
+				port->sm_rx_state = AD_RX_DEFAULTED;
 				break;
 			case AD_RX_CURRENT:
-				port->sm_rx_state = AD_RX_EXPIRED;	    // next state
+				port->sm_rx_state = AD_RX_EXPIRED;
 				break;
-			default:    //to silence the compiler
+			default:
 				break;
 			}
 		} else {
-			// if no lacpdu arrived and no timer is on
+			/* if no lacpdu arrived and no timer is on */
 			switch (port->sm_rx_state) {
 			case AD_RX_PORT_DISABLED:
 				if (port->sm_vars & AD_PORT_MOVED)
-					port->sm_rx_state = AD_RX_INITIALIZE;	    // next state
+					port->sm_rx_state = AD_RX_INITIALIZE;
 				else if (port->is_enabled
 					 && (port->sm_vars
 					     & AD_PORT_LACP_ENABLED))
-					port->sm_rx_state = AD_RX_EXPIRED;	// next state
+					port->sm_rx_state = AD_RX_EXPIRED;
 				else if (port->is_enabled
 					 && ((port->sm_vars
 					      & AD_PORT_LACP_ENABLED) == 0))
-					port->sm_rx_state = AD_RX_LACP_DISABLED;    // next state
+					port->sm_rx_state = AD_RX_LACP_DISABLED;
 				break;
-			default:    //to silence the compiler
+			default:
 				break;
 
 			}
 		}
 	}
 
-	// check if the State machine was changed or new lacpdu arrived
+	/* check if the State machine was changed or new lacpdu arrived */
 	if ((port->sm_rx_state != last_state) || (lacpdu)) {
 		pr_debug("Rx Machine: Port=%d, Last State=%d, Curr State=%d\n",
 			 port->actor_port_number, last_state,
@@ -1039,10 +1043,9 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
 			__record_default(port);
 			port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
 			port->sm_vars &= ~AD_PORT_MOVED;
-			port->sm_rx_state = AD_RX_PORT_DISABLED;	// next state
-
-			/*- Fall Through -*/
+			port->sm_rx_state = AD_RX_PORT_DISABLED;
 
+			/* Fall Through */
 		case AD_RX_PORT_DISABLED:
 			port->sm_vars &= ~AD_PORT_MATCHED;
 			break;
@@ -1054,13 +1057,15 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
 			port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
 			break;
 		case AD_RX_EXPIRED:
-			//Reset of the Synchronization flag. (Standard 43.4.12)
-			//This reset cause to disable this port in the COLLECTING_DISTRIBUTING state of the
-			//mux machine in case of EXPIRED even if LINK_DOWN didn't arrive for the port.
+			/* Reset of the Synchronization flag (Standard 43.4.12)
+			 * This reset cause to disable this port in the
+			 * COLLECTING_DISTRIBUTING state of the mux machine in
+			 * case of EXPIRED even if LINK_DOWN didn't arrive for
+			 * the port.
+			 */
 			port->partner_oper.port_state &= ~AD_STATE_SYNCHRONIZATION;
 			port->sm_vars &= ~AD_PORT_MATCHED;
-			port->partner_oper.port_state |=
-				AD_STATE_LACP_ACTIVITY;
+			port->partner_oper.port_state |= AD_STATE_LACP_ACTIVITY;
 			port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
 			port->actor_oper_port_state |= AD_STATE_EXPIRED;
 			break;
@@ -1071,12 +1076,12 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
 			port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
 			break;
 		case AD_RX_CURRENT:
-			// detect loopback situation
-			if (!MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->actor_system))) {
-				// INFO_RECEIVED_LOOPBACK_FRAMES
-				pr_err("%s: An illegal loopback occurred on adapter (%s).\n"
-				       "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
-				       port->slave->bond->dev->name, port->slave->dev->name);
+			/* detect loopback situation */
+			if (MAC_ADDRESS_EQUAL(&(lacpdu->actor_system),
+					      &(port->actor_system))) {
+				pr_err("%s: An illegal loopback occurred on adapter (%s).\nCheck the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
+				       port->slave->bond->dev->name,
+				       port->slave->dev->name);
 				return;
 			}
 			__update_selected(lacpdu, port);
@@ -1085,7 +1090,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
 			port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT));
 			port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
 			break;
-		default:    //to silence the compiler
+		default:
 			break;
 		}
 	}
@@ -1094,13 +1099,14 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
 /**
  * ad_tx_machine - handle a port's tx state machine
  * @port: the port we're looking at
- *
  */
 static void ad_tx_machine(struct port *port)
 {
-	// check if tx timer expired, to verify that we do not send more than 3 packets per second
+	/* check if tx timer expired, to verify that we do not send more than
+	 * 3 packets per second
+	 */
 	if (port->sm_tx_timer_counter && !(--port->sm_tx_timer_counter)) {
-		// check if there is something to send
+		/* check if there is something to send */
 		if (port->ntt && (port->sm_vars & AD_PORT_LACP_ENABLED)) {
 			__update_lacpdu_from_port(port);
 
@@ -1108,14 +1114,16 @@ static void ad_tx_machine(struct port *port)
 				pr_debug("Sent LACPDU on port %d\n",
 					 port->actor_port_number);
 
-				/* mark ntt as false, so it will not be sent again until
-				   demanded */
+				/* mark ntt as false, so it will not be sent
+				 * again until demanded
+				 */
 				port->ntt = false;
 			}
 		}
-		// restart tx timer(to verify that we will not exceed AD_MAX_TX_IN_SECOND
-		port->sm_tx_timer_counter =
-			ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
+		/* restart tx timer(to verify that we will not exceed
+		 * AD_MAX_TX_IN_SECOND
+		 */
+		port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
 	}
 }
 
@@ -1129,76 +1137,79 @@ static void ad_periodic_machine(struct port *port)
 {
 	periodic_states_t last_state;
 
-	// keep current state machine state to compare later if it was changed
+	/* keep current state machine state to compare later if it was changed */
 	last_state = port->sm_periodic_state;
 
-	// check if port was reinitialized
+	/* check if port was reinitialized */
 	if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
 	    (!(port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & AD_STATE_LACP_ACTIVITY))
 	   ) {
-		port->sm_periodic_state = AD_NO_PERIODIC;	     // next state
+		port->sm_periodic_state = AD_NO_PERIODIC;
 	}
-	// check if state machine should change state
+	/* check if state machine should change state */
 	else if (port->sm_periodic_timer_counter) {
-		// check if periodic state machine expired
+		/* check if periodic state machine expired */
 		if (!(--port->sm_periodic_timer_counter)) {
-			// if expired then do tx
-			port->sm_periodic_state = AD_PERIODIC_TX;    // next state
+			/* if expired then do tx */
+			port->sm_periodic_state = AD_PERIODIC_TX;
 		} else {
-			// If not expired, check if there is some new timeout parameter from the partner state
+			/* If not expired, check if there is some new timeout
+			 * parameter from the partner state
+			 */
 			switch (port->sm_periodic_state) {
 			case AD_FAST_PERIODIC:
 				if (!(port->partner_oper.port_state
 				      & AD_STATE_LACP_TIMEOUT))
-					port->sm_periodic_state = AD_SLOW_PERIODIC;  // next state
+					port->sm_periodic_state = AD_SLOW_PERIODIC;
 				break;
 			case AD_SLOW_PERIODIC:
 				if ((port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) {
-					// stop current timer
 					port->sm_periodic_timer_counter = 0;
-					port->sm_periodic_state = AD_PERIODIC_TX;	 // next state
+					port->sm_periodic_state = AD_PERIODIC_TX;
 				}
 				break;
-			default:    //to silence the compiler
+			default:
 				break;
 			}
 		}
 	} else {
 		switch (port->sm_periodic_state) {
 		case AD_NO_PERIODIC:
-			port->sm_periodic_state = AD_FAST_PERIODIC;	 // next state
+			port->sm_periodic_state = AD_FAST_PERIODIC;
 			break;
 		case AD_PERIODIC_TX:
-			if (!(port->partner_oper.port_state
-			      & AD_STATE_LACP_TIMEOUT))
-				port->sm_periodic_state = AD_SLOW_PERIODIC;  // next state
+			if (!(port->partner_oper.port_state &
+			    AD_STATE_LACP_TIMEOUT))
+				port->sm_periodic_state = AD_SLOW_PERIODIC;
 			else
-				port->sm_periodic_state = AD_FAST_PERIODIC;  // next state
+				port->sm_periodic_state = AD_FAST_PERIODIC;
 			break;
-		default:    //to silence the compiler
+		default:
 			break;
 		}
 	}
 
-	// check if the state machine was changed
+	/* check if the state machine was changed */
 	if (port->sm_periodic_state != last_state) {
 		pr_debug("Periodic Machine: Port=%d, Last State=%d, Curr State=%d\n",
 			 port->actor_port_number, last_state,
 			 port->sm_periodic_state);
 		switch (port->sm_periodic_state) {
 		case AD_NO_PERIODIC:
-			port->sm_periodic_timer_counter = 0;	   // zero timer
+			port->sm_periodic_timer_counter = 0;
 			break;
 		case AD_FAST_PERIODIC:
-			port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_FAST_PERIODIC_TIME))-1; // decrement 1 tick we lost in the PERIODIC_TX cycle
+			/* decrement 1 tick we lost in the PERIODIC_TX cycle */
+			port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_FAST_PERIODIC_TIME))-1;
 			break;
 		case AD_SLOW_PERIODIC:
-			port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_SLOW_PERIODIC_TIME))-1; // decrement 1 tick we lost in the PERIODIC_TX cycle
+			/* decrement 1 tick we lost in the PERIODIC_TX cycle */
+			port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_SLOW_PERIODIC_TIME))-1;
 			break;
 		case AD_PERIODIC_TX:
 			port->ntt = true;
 			break;
-		default:    //to silence the compiler
+		default:
 			break;
 		}
 	}
@@ -1221,30 +1232,38 @@ static void ad_port_selection_logic(struct port *port)
 	struct slave *slave;
 	int found = 0;
 
-	// if the port is already Selected, do nothing
+	/* if the port is already Selected, do nothing */
 	if (port->sm_vars & AD_PORT_SELECTED)
 		return;
 
 	bond = __get_bond_by_port(port);
 
-	// if the port is connected to other aggregator, detach it
+	/* if the port is connected to other aggregator, detach it */
 	if (port->aggregator) {
-		// detach the port from its former aggregator
+		/* detach the port from its former aggregator */
 		temp_aggregator = port->aggregator;
 		for (curr_port = temp_aggregator->lag_ports; curr_port;
 		     last_port = curr_port,
-			     curr_port = curr_port->next_port_in_aggregator) {
+		     curr_port = curr_port->next_port_in_aggregator) {
 			if (curr_port == port) {
 				temp_aggregator->num_of_ports--;
-				if (!last_port) {// if it is the first port attached to the aggregator
+				/* if it is the first port attached to the
+				 * aggregator
+				 */
+				if (!last_port) {
 					temp_aggregator->lag_ports =
 						port->next_port_in_aggregator;
-				} else {// not the first port attached to the aggregator
+				} else {
+					/* not the first port attached to the
+					 * aggregator
+					 */
 					last_port->next_port_in_aggregator =
 						port->next_port_in_aggregator;
 				}
 
-				// clear the port's relations to this aggregator
+				/* clear the port's relations to this
+				 * aggregator
+				 */
 				port->aggregator = NULL;
 				port->next_port_in_aggregator = NULL;
 				port->actor_port_aggregator_identifier = 0;
@@ -1252,41 +1271,46 @@ static void ad_port_selection_logic(struct port *port)
 				pr_debug("Port %d left LAG %d\n",
 					 port->actor_port_number,
 					 temp_aggregator->aggregator_identifier);
-				// if the aggregator is empty, clear its parameters, and set it ready to be attached
+				/* if the aggregator is empty, clear its
+				 * parameters, and set it ready to be attached
+				 */
 				if (!temp_aggregator->lag_ports)
 					ad_clear_agg(temp_aggregator);
 				break;
 			}
 		}
-		if (!curr_port) { // meaning: the port was related to an aggregator but was not on the aggregator port list
-			pr_warning("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
-				   port->slave->bond->dev->name,
-				   port->actor_port_number,
-				   port->slave->dev->name,
-				   port->aggregator->aggregator_identifier);
+		if (!curr_port) {
+			/* meaning: the port was related to an aggregator
+			 * but was not on the aggregator port list
+			 */
+			pr_warn("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
+				port->slave->bond->dev->name,
+				port->actor_port_number,
+				port->slave->dev->name,
+				port->aggregator->aggregator_identifier);
 		}
 	}
-	// search on all aggregators for a suitable aggregator for this port
+	/* search on all aggregators for a suitable aggregator for this port */
 	bond_for_each_slave(bond, slave, iter) {
 		aggregator = &(SLAVE_AD_INFO(slave).aggregator);
 
-		// keep a free aggregator for later use(if needed)
+		/* keep a free aggregator for later use(if needed) */
 		if (!aggregator->lag_ports) {
 			if (!free_aggregator)
 				free_aggregator = aggregator;
 			continue;
 		}
-		// check if current aggregator suits us
-		if (((aggregator->actor_oper_aggregator_key == port->actor_oper_port_key) && // if all parameters match AND
-		     !MAC_ADDRESS_COMPARE(&(aggregator->partner_system), &(port->partner_oper.system)) &&
+		/* check if current aggregator suits us */
+		if (((aggregator->actor_oper_aggregator_key == port->actor_oper_port_key) && /* if all parameters match AND */
+		     MAC_ADDRESS_EQUAL(&(aggregator->partner_system), &(port->partner_oper.system)) &&
 		     (aggregator->partner_system_priority == port->partner_oper.system_priority) &&
 		     (aggregator->partner_oper_aggregator_key == port->partner_oper.key)
 		    ) &&
-		    ((MAC_ADDRESS_COMPARE(&(port->partner_oper.system), &(null_mac_addr)) && // partner answers
-		      !aggregator->is_individual)  // but is not individual OR
+		    ((!MAC_ADDRESS_EQUAL(&(port->partner_oper.system), &(null_mac_addr)) && /* partner answers */
+		      !aggregator->is_individual)  /* but is not individual OR */
 		    )
 		   ) {
-			// attach to the founded aggregator
+			/* attach to the founded aggregator */
 			port->aggregator = aggregator;
 			port->actor_port_aggregator_identifier =
 				port->aggregator->aggregator_identifier;
@@ -1297,23 +1321,26 @@ static void ad_port_selection_logic(struct port *port)
 				 port->actor_port_number,
 				 port->aggregator->aggregator_identifier);
 
-			// mark this port as selected
+			/* mark this port as selected */
 			port->sm_vars |= AD_PORT_SELECTED;
 			found = 1;
 			break;
 		}
 	}
 
-	// the port couldn't find an aggregator - attach it to a new aggregator
+	/* the port couldn't find an aggregator - attach it to a new
+	 * aggregator
+	 */
 	if (!found) {
 		if (free_aggregator) {
-			// assign port a new aggregator
+			/* assign port a new aggregator */
 			port->aggregator = free_aggregator;
 			port->actor_port_aggregator_identifier =
 				port->aggregator->aggregator_identifier;
 
-			// update the new aggregator's parameters
-			// if port was responsed from the end-user
+			/* update the new aggregator's parameters
+			 * if port was responsed from the end-user
+			 */
 			if (port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)
 				/* if port is full duplex */
 				port->aggregator->is_individual = false;
@@ -1332,7 +1359,7 @@ static void ad_port_selection_logic(struct port *port)
 			port->aggregator->lag_ports = port;
 			port->aggregator->num_of_ports++;
 
-			// mark this port as selected
+			/* mark this port as selected */
 			port->sm_vars |= AD_PORT_SELECTED;
 
 			pr_debug("Port %d joined LAG %d(new LAG)\n",
@@ -1344,23 +1371,24 @@ static void ad_port_selection_logic(struct port *port)
 			       port->actor_port_number, port->slave->dev->name);
 		}
 	}
-	// if all aggregator's ports are READY_N == TRUE, set ready=TRUE in all aggregator's ports
-	// else set ready=FALSE in all aggregator's ports
-	__set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
+	/* if all aggregator's ports are READY_N == TRUE, set ready=TRUE
+	 * in all aggregator's ports, else set ready=FALSE in all
+	 * aggregator's ports
+	 */
+	__set_agg_ports_ready(port->aggregator,
+			      __agg_ports_are_ready(port->aggregator));
 
 	aggregator = __get_first_agg(port);
 	ad_agg_selection_logic(aggregator);
 }
 
-/*
- * Decide if "agg" is a better choice for the new active aggregator that
+/* Decide if "agg" is a better choice for the new active aggregator that
  * the current best, according to the ad_select policy.
  */
 static struct aggregator *ad_agg_selection_test(struct aggregator *best,
 						struct aggregator *curr)
 {
-	/*
-	 * 0. If no best, select current.
+	/* 0. If no best, select current.
 	 *
 	 * 1. If the current agg is not individual, and the best is
 	 *    individual, select current.
@@ -1416,9 +1444,9 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
 		break;
 
 	default:
-		pr_warning("%s: Impossible agg select mode %d\n",
-			   curr->slave->bond->dev->name,
-			   __get_agg_selection_mode(curr->lag_ports));
+		pr_warn("%s: Impossible agg select mode %d\n",
+			curr->slave->bond->dev->name,
+			__get_agg_selection_mode(curr->lag_ports));
 		break;
 	}
 
@@ -1428,10 +1456,12 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
 static int agg_device_up(const struct aggregator *agg)
 {
 	struct port *port = agg->lag_ports;
+
 	if (!port)
 		return 0;
-	return (netif_running(port->slave->dev) &&
-		netif_carrier_ok(port->slave->dev));
+
+	return netif_running(port->slave->dev) &&
+	       netif_carrier_ok(port->slave->dev);
 }
 
 /**
@@ -1467,11 +1497,12 @@ static void ad_agg_selection_logic(struct aggregator *agg)
 	struct slave *slave;
 	struct port *port;
 
+	rcu_read_lock();
 	origin = agg;
 	active = __get_active_agg(agg);
 	best = (active && agg_device_up(active)) ? active : NULL;
 
-	bond_for_each_slave(bond, slave, iter) {
+	bond_for_each_slave_rcu(bond, slave, iter) {
 		agg = &(SLAVE_AD_INFO(slave).aggregator);
 
 		agg->is_active = 0;
@@ -1482,8 +1513,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
 
 	if (best &&
 	    __get_agg_selection_mode(best->lag_ports) == BOND_AD_STABLE) {
-		/*
-		 * For the STABLE policy, don't replace the old active
+		/* For the STABLE policy, don't replace the old active
 		 * aggregator if it's still active (it has an answering
 		 * partner) or if both the best and active don't have an
 		 * answering partner.
@@ -1491,7 +1521,8 @@ static void ad_agg_selection_logic(struct aggregator *agg)
 		if (active && active->lag_ports &&
 		    active->lag_ports->is_enabled &&
 		    (__agg_has_partner(active) ||
-		     (!__agg_has_partner(active) && !__agg_has_partner(best)))) {
+		     (!__agg_has_partner(active) &&
+		     !__agg_has_partner(best)))) {
 			if (!(!active->actor_oper_aggregator_key &&
 			      best->actor_oper_aggregator_key)) {
 				best = NULL;
@@ -1505,7 +1536,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
 		active->is_active = 1;
 	}
 
-	// if there is new best aggregator, activate it
+	/* if there is new best aggregator, activate it */
 	if (best) {
 		pr_debug("best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
 			 best->aggregator_identifier, best->num_of_ports,
@@ -1516,7 +1547,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
 			 best->lag_ports, best->slave,
 			 best->slave ? best->slave->dev->name : "NULL");
 
-		bond_for_each_slave(bond, slave, iter) {
+		bond_for_each_slave_rcu(bond, slave, iter) {
 			agg = &(SLAVE_AD_INFO(slave).aggregator);
 
 			pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
@@ -1526,10 +1557,11 @@ static void ad_agg_selection_logic(struct aggregator *agg)
 				 agg->is_individual, agg->is_active);
 		}
 
-		// check if any partner replys
+		/* check if any partner replys */
 		if (best->is_individual) {
-			pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
-				   best->slave ? best->slave->bond->dev->name : "NULL");
+			pr_warn("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
+				best->slave ?
+				best->slave->bond->dev->name : "NULL");
 		}
 
 		best->is_active = 1;
@@ -1541,7 +1573,9 @@ static void ad_agg_selection_logic(struct aggregator *agg)
 			 best->partner_oper_aggregator_key,
 			 best->is_individual, best->is_active);
 
-		// disable the ports that were related to the former active_aggregator
+		/* disable the ports that were related to the former
+		 * active_aggregator
+		 */
 		if (active) {
 			for (port = active->lag_ports; port;
 			     port = port->next_port_in_aggregator) {
@@ -1550,8 +1584,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
 		}
 	}
 
-	/*
-	 * if the selected aggregator is of join individuals
+	/* if the selected aggregator is of join individuals
 	 * (partner_system is NULL), enable their ports
 	 */
 	active = __get_active_agg(origin);
@@ -1565,13 +1598,14 @@ static void ad_agg_selection_logic(struct aggregator *agg)
 		}
 	}
 
+	rcu_read_unlock();
+
 	bond_3ad_set_carrier(bond);
 }
 
 /**
  * ad_clear_agg - clear a given aggregator's parameters
  * @aggregator: the aggregator we're looking at
- *
  */
 static void ad_clear_agg(struct aggregator *aggregator)
 {
@@ -1595,7 +1629,6 @@ static void ad_clear_agg(struct aggregator *aggregator)
 /**
  * ad_initialize_agg - initialize a given aggregator's parameters
  * @aggregator: the aggregator we're looking at
- *
  */
 static void ad_initialize_agg(struct aggregator *aggregator)
 {
@@ -1612,7 +1645,6 @@ static void ad_initialize_agg(struct aggregator *aggregator)
  * ad_initialize_port - initialize a given port's parameters
  * @aggregator: the aggregator we're looking at
  * @lacp_fast: boolean. whether fast periodic should be used
- *
  */
 static void ad_initialize_port(struct port *port, int lacp_fast)
 {
@@ -1644,8 +1676,10 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
 		port->ntt = false;
 		port->actor_admin_port_key = 1;
 		port->actor_oper_port_key  = 1;
-		port->actor_admin_port_state = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY;
-		port->actor_oper_port_state  = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY;
+		port->actor_admin_port_state = AD_STATE_AGGREGATION |
+					       AD_STATE_LACP_ACTIVITY;
+		port->actor_oper_port_state  = AD_STATE_AGGREGATION |
+					       AD_STATE_LACP_ACTIVITY;
 
 		if (lacp_fast)
 			port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
@@ -1654,7 +1688,7 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
 		memcpy(&port->partner_oper, &tmpl, sizeof(tmpl));
 
 		port->is_enabled = true;
-		// ****** private parameters ******
+		/* private parameters */
 		port->sm_vars = 0x3;
 		port->sm_rx_state = 0;
 		port->sm_rx_timer_counter = 0;
@@ -1692,11 +1726,12 @@ static void ad_enable_collecting_distributing(struct port *port)
 /**
  * ad_disable_collecting_distributing - disable a port's transmit/receive
  * @port: the port we're looking at
- *
  */
 static void ad_disable_collecting_distributing(struct port *port)
 {
-	if (port->aggregator && MAC_ADDRESS_COMPARE(&(port->aggregator->partner_system), &(null_mac_addr))) {
+	if (port->aggregator &&
+	    !MAC_ADDRESS_EQUAL(&(port->aggregator->partner_system),
+			       &(null_mac_addr))) {
 		pr_debug("Disabling port %d(LAG %d)\n",
 			 port->actor_port_number,
 			 port->aggregator->aggregator_identifier);
@@ -1704,66 +1739,22 @@ static void ad_disable_collecting_distributing(struct port *port)
 	}
 }
 
-#if 0
-/**
- * ad_marker_info_send - send a marker information frame
- * @port: the port we're looking at
- *
- * This function does nothing since we decided not to implement send and handle
- * response for marker PDU's, in this stage, but only to respond to marker
- * information.
- */
-static void ad_marker_info_send(struct port *port)
-{
-	struct bond_marker marker;
-	u16 index;
-
-	// fill the marker PDU with the appropriate values
-	marker.subtype = 0x02;
-	marker.version_number = 0x01;
-	marker.tlv_type = AD_MARKER_INFORMATION_SUBTYPE;
-	marker.marker_length = 0x16;
-	// convert requester_port to Big Endian
-	marker.requester_port = (((port->actor_port_number & 0xFF) << 8) |((u16)(port->actor_port_number & 0xFF00) >> 8));
-	marker.requester_system = port->actor_system;
-	// convert requester_port(u32) to Big Endian
-	marker.requester_transaction_id =
-		(((++port->transaction_id & 0xFF) << 24)
-		 | ((port->transaction_id & 0xFF00) << 8)
-		 | ((port->transaction_id & 0xFF0000) >> 8)
-		 | ((port->transaction_id & 0xFF000000) >> 24));
-	marker.pad = 0;
-	marker.tlv_type_terminator = 0x00;
-	marker.terminator_length = 0x00;
-	for (index = 0; index < 90; index++)
-		marker.reserved_90[index] = 0;
-
-	// send the marker information
-	if (ad_marker_send(port, &marker) >= 0) {
-		pr_debug("Sent Marker Information on port %d\n",
-			 port->actor_port_number);
-	}
-}
-#endif
-
 /**
  * ad_marker_info_received - handle receive of a Marker information frame
  * @marker_info: Marker info received
  * @port: the port we're looking at
- *
  */
 static void ad_marker_info_received(struct bond_marker *marker_info,
 	struct port *port)
 {
 	struct bond_marker marker;
 
-	// copy the received marker data to the response marker
-	//marker = *marker_info;
+	/* copy the received marker data to the response marker */
 	memcpy(&marker, marker_info, sizeof(struct bond_marker));
-	// change the marker subtype to marker response
+	/* change the marker subtype to marker response */
 	marker.tlv_type = AD_MARKER_RESPONSE_SUBTYPE;
-	// send the marker response
 
+	/* send the marker response */
 	if (ad_marker_send(port, &marker) >= 0) {
 		pr_debug("Sent Marker Response on port %d\n",
 			 port->actor_port_number);
@@ -1780,22 +1771,21 @@ static void ad_marker_info_received(struct bond_marker *marker_info,
  * information.
  */
 static void ad_marker_response_received(struct bond_marker *marker,
-	struct port *port)
+					struct port *port)
 {
-	marker = NULL; /* just to satisfy the compiler */
-	port = NULL;  /* just to satisfy the compiler */
-	// DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW
+	marker = NULL;
+	port = NULL;
+	/* DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW */
 }
 
-//////////////////////////////////////////////////////////////////////////////////////
-// ================= AD exported functions to the main bonding code ==================
-//////////////////////////////////////////////////////////////////////////////////////
+/* ========= AD exported functions to the main bonding code ========= */
 
-// Check aggregators status in team every T seconds
+/* Check aggregators status in team every T seconds */
 #define AD_AGGREGATOR_SELECTION_TIMER  8
 
-/*
- * bond_3ad_initiate_agg_selection(struct bonding *bond)
+/**
+ * bond_3ad_initiate_agg_selection - initate aggregator selection
+ * @bond: bonding struct
  *
  * Set the aggregation selection timer, to initiate an agg selection in
  * the very near future.  Called during first initialization, and during
@@ -1817,8 +1807,8 @@ static u16 aggregator_identifier;
  */
 void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
 {
-	// check that the bond is not initialized yet
-	if (MAC_ADDRESS_COMPARE(&(BOND_AD_INFO(bond).system.sys_mac_addr),
+	/* check that the bond is not initialized yet */
+	if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
 				bond->dev->dev_addr)) {
 
 		aggregator_identifier = 0;
@@ -1826,7 +1816,9 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
 		BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
 		BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
 
-		// initialize how many times this module is called in one second(should be about every 100ms)
+		/* initialize how many times this module is called in one
+		 * second (should be about every 100ms)
+		 */
 		ad_ticks_per_sec = tick_resolution;
 
 		bond_3ad_initiate_agg_selection(bond,
@@ -1842,22 +1834,16 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
  * Returns:   0 on success
  *          < 0 on error
  */
-int bond_3ad_bind_slave(struct slave *slave)
+void bond_3ad_bind_slave(struct slave *slave)
 {
 	struct bonding *bond = bond_get_bond_by_slave(slave);
 	struct port *port;
 	struct aggregator *aggregator;
 
-	if (bond == NULL) {
-		pr_err("%s: The slave %s is not attached to its bond\n",
-		       slave->bond->dev->name, slave->dev->name);
-		return -1;
-	}
-
-	//check that the slave has not been initialized yet.
+	/* check that the slave has not been initialized yet. */
 	if (SLAVE_AD_INFO(slave).port.slave != slave) {
 
-		// port initialization
+		/* port initialization */
 		port = &(SLAVE_AD_INFO(slave).port);
 
 		ad_initialize_port(port, bond->params.lacp_fast);
@@ -1865,28 +1851,30 @@ int bond_3ad_bind_slave(struct slave *slave)
 		__initialize_port_locks(slave);
 		port->slave = slave;
 		port->actor_port_number = SLAVE_AD_INFO(slave).id;
-		// key is determined according to the link speed, duplex and user key(which is yet not supported)
-		//              ------------------------------------------------------------
-		// Port key :   | User key                       |      Speed       |Duplex|
-		//              ------------------------------------------------------------
-		//              16                               6               1 0
-		port->actor_admin_port_key = 0;	// initialize this parameter
+		/* key is determined according to the link speed, duplex and user key(which
+		 * is yet not supported)
+		 */
+		port->actor_admin_port_key = 0;
 		port->actor_admin_port_key |= __get_duplex(port);
 		port->actor_admin_port_key |= (__get_link_speed(port) << 1);
 		port->actor_oper_port_key = port->actor_admin_port_key;
-		// if the port is not full duplex, then the port should be not lacp Enabled
+		/* if the port is not full duplex, then the port should be not
+		 * lacp Enabled
+		 */
 		if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS))
 			port->sm_vars &= ~AD_PORT_LACP_ENABLED;
-		// actor system is the bond's system
+		/* actor system is the bond's system */
 		port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
-		// tx timer(to verify that no more than MAX_TX_IN_SECOND lacpdu's are sent in one second)
+		/* tx timer(to verify that no more than MAX_TX_IN_SECOND
+		 * lacpdu's are sent in one second)
+		 */
 		port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
 		port->aggregator = NULL;
 		port->next_port_in_aggregator = NULL;
 
 		__disable_port(port);
 
-		// aggregator initialization
+		/* aggregator initialization */
 		aggregator = &(SLAVE_AD_INFO(slave).aggregator);
 
 		ad_initialize_agg(aggregator);
@@ -1897,8 +1885,6 @@ int bond_3ad_bind_slave(struct slave *slave)
 		aggregator->is_active = 0;
 		aggregator->num_of_ports = 0;
 	}
-
-	return 0;
 }
 
 /**
@@ -1918,16 +1904,13 @@ void bond_3ad_unbind_slave(struct slave *slave)
 	struct slave *slave_iter;
 	struct list_head *iter;
 
-	// find the aggregator related to this slave
 	aggregator = &(SLAVE_AD_INFO(slave).aggregator);
-
-	// find the port related to this slave
 	port = &(SLAVE_AD_INFO(slave).port);
 
-	// if slave is null, the whole port is not initialized
+	/* if slave is null, the whole port is not initialized */
 	if (!port->slave) {
-		pr_warning("Warning: %s: Trying to unbind an uninitialized port on %s\n",
-			   slave->bond->dev->name, slave->dev->name);
+		pr_warn("Warning: %s: Trying to unbind an uninitialized port on %s\n",
+			slave->bond->dev->name, slave->dev->name);
 		return;
 	}
 
@@ -1939,34 +1922,42 @@ void bond_3ad_unbind_slave(struct slave *slave)
 	__update_lacpdu_from_port(port);
 	ad_lacpdu_send(port);
 
-	// check if this aggregator is occupied
+	/* check if this aggregator is occupied */
 	if (aggregator->lag_ports) {
-		// check if there are other ports related to this aggregator except
-		// the port related to this slave(thats ensure us that there is a
-		// reason to search for new aggregator, and that we will find one
-		if ((aggregator->lag_ports != port) || (aggregator->lag_ports->next_port_in_aggregator)) {
-			// find new aggregator for the related port(s)
+		/* check if there are other ports related to this aggregator
+		 * except the port related to this slave(thats ensure us that
+		 * there is a reason to search for new aggregator, and that we
+		 * will find one
+		 */
+		if ((aggregator->lag_ports != port) ||
+		    (aggregator->lag_ports->next_port_in_aggregator)) {
+			/* find new aggregator for the related port(s) */
 			bond_for_each_slave(bond, slave_iter, iter) {
 				new_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
-				// if the new aggregator is empty, or it is connected to our port only
-				if (!new_aggregator->lag_ports
-				    || ((new_aggregator->lag_ports == port)
-					&& !new_aggregator->lag_ports->next_port_in_aggregator))
+				/* if the new aggregator is empty, or it is
+				 * connected to our port only
+				 */
+				if (!new_aggregator->lag_ports ||
+				    ((new_aggregator->lag_ports == port) &&
+				     !new_aggregator->lag_ports->next_port_in_aggregator))
 					break;
 			}
 			if (!slave_iter)
 				new_aggregator = NULL;
-			// if new aggregator found, copy the aggregator's parameters
-			// and connect the related lag_ports to the new aggregator
+
+			/* if new aggregator found, copy the aggregator's
+			 * parameters and connect the related lag_ports to the
+			 * new aggregator
+			 */
 			if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
 				pr_debug("Some port(s) related to LAG %d - replaceing with LAG %d\n",
 					 aggregator->aggregator_identifier,
 					 new_aggregator->aggregator_identifier);
 
-				if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) {
+				if ((new_aggregator->lag_ports == port) &&
+				    new_aggregator->is_active) {
 					pr_info("%s: Removing an active aggregator\n",
 						aggregator->slave->bond->dev->name);
-					// select new active aggregator
 					 select_new_active_agg = 1;
 				}
 
@@ -1982,30 +1973,33 @@ void bond_3ad_unbind_slave(struct slave *slave)
 				new_aggregator->is_active = aggregator->is_active;
 				new_aggregator->num_of_ports = aggregator->num_of_ports;
 
-				// update the information that is written on the ports about the aggregator
+				/* update the information that is written on
+				 * the ports about the aggregator
+				 */
 				for (temp_port = aggregator->lag_ports; temp_port;
 				     temp_port = temp_port->next_port_in_aggregator) {
 					temp_port->aggregator = new_aggregator;
 					temp_port->actor_port_aggregator_identifier = new_aggregator->aggregator_identifier;
 				}
 
-				// clear the aggregator
 				ad_clear_agg(aggregator);
 
 				if (select_new_active_agg)
 					ad_agg_selection_logic(__get_first_agg(port));
 			} else {
-				pr_warning("%s: Warning: unbinding aggregator, and could not find a new aggregator for its ports\n",
-					   slave->bond->dev->name);
+				pr_warn("%s: Warning: unbinding aggregator, and could not find a new aggregator for its ports\n",
+					slave->bond->dev->name);
 			}
-		} else { // in case that the only port related to this aggregator is the one we want to remove
+		} else {
+			/* in case that the only port related to this
+			 * aggregator is the one we want to remove
+			 */
 			select_new_active_agg = aggregator->is_active;
-			// clear the aggregator
 			ad_clear_agg(aggregator);
 			if (select_new_active_agg) {
 				pr_info("%s: Removing an active aggregator\n",
 					slave->bond->dev->name);
-				// select new active aggregator
+				/* select new active aggregator */
 				temp_aggregator = __get_first_agg(port);
 				if (temp_aggregator)
 					ad_agg_selection_logic(temp_aggregator);
@@ -2014,15 +2008,19 @@ void bond_3ad_unbind_slave(struct slave *slave)
 	}
 
 	pr_debug("Unbinding port %d\n", port->actor_port_number);
-	// find the aggregator that this port is connected to
+
+	/* find the aggregator that this port is connected to */
 	bond_for_each_slave(bond, slave_iter, iter) {
 		temp_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
 		prev_port = NULL;
-		// search the port in the aggregator's related ports
+		/* search the port in the aggregator's related ports */
 		for (temp_port = temp_aggregator->lag_ports; temp_port;
 		     prev_port = temp_port,
-			     temp_port = temp_port->next_port_in_aggregator) {
-			if (temp_port == port) { // the aggregator found - detach the port from this aggregator
+		     temp_port = temp_port->next_port_in_aggregator) {
+			if (temp_port == port) {
+				/* the aggregator found - detach the port from
+				 * this aggregator
+				 */
 				if (prev_port)
 					prev_port->next_port_in_aggregator = temp_port->next_port_in_aggregator;
 				else
@@ -2030,12 +2028,11 @@ void bond_3ad_unbind_slave(struct slave *slave)
 				temp_aggregator->num_of_ports--;
 				if (temp_aggregator->num_of_ports == 0) {
 					select_new_active_agg = temp_aggregator->is_active;
-					// clear the aggregator
 					ad_clear_agg(temp_aggregator);
 					if (select_new_active_agg) {
 						pr_info("%s: Removing an active aggregator\n",
 							slave->bond->dev->name);
-						// select new active aggregator
+						/* select new active aggregator */
 						ad_agg_selection_logic(__get_first_agg(port));
 					}
 				}
@@ -2069,21 +2066,23 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
 	struct port *port;
 
 	read_lock(&bond->lock);
+	rcu_read_lock();
 
-	//check if there are any slaves
+	/* check if there are any slaves */
 	if (!bond_has_slaves(bond))
 		goto re_arm;
 
-	// check if agg_select_timer timer after initialize is timed out
-	if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) {
-		slave = bond_first_slave(bond);
+	/* check if agg_select_timer timer after initialize is timed out */
+	if (BOND_AD_INFO(bond).agg_select_timer &&
+	    !(--BOND_AD_INFO(bond).agg_select_timer)) {
+		slave = bond_first_slave_rcu(bond);
 		port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL;
 
-		// select the active aggregator for the bond
+		/* select the active aggregator for the bond */
 		if (port) {
 			if (!port->slave) {
-				pr_warning("%s: Warning: bond's first port is uninitialized\n",
-					   bond->dev->name);
+				pr_warn("%s: Warning: bond's first port is uninitialized\n",
+					bond->dev->name);
 				goto re_arm;
 			}
 
@@ -2093,12 +2092,12 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
 		bond_3ad_set_carrier(bond);
 	}
 
-	// for each port run the state machines
-	bond_for_each_slave(bond, slave, iter) {
+	/* for each port run the state machines */
+	bond_for_each_slave_rcu(bond, slave, iter) {
 		port = &(SLAVE_AD_INFO(slave).port);
 		if (!port->slave) {
-			pr_warning("%s: Warning: Found an uninitialized port\n",
-				   bond->dev->name);
+			pr_warn("%s: Warning: Found an uninitialized port\n",
+				bond->dev->name);
 			goto re_arm;
 		}
 
@@ -2114,7 +2113,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
 		ad_mux_machine(port);
 		ad_tx_machine(port);
 
-		// turn off the BEGIN bit, since we already handled it
+		/* turn off the BEGIN bit, since we already handled it */
 		if (port->sm_vars & AD_PORT_BEGIN)
 			port->sm_vars &= ~AD_PORT_BEGIN;
 
@@ -2122,9 +2121,9 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
 	}
 
 re_arm:
-	queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
-
+	rcu_read_unlock();
 	read_unlock(&bond->lock);
+	queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
 }
 
 /**
@@ -2137,7 +2136,8 @@ re_arm:
  * received frames (loopback). Since only the payload is given to this
  * function, it check for loopback.
  */
-static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length)
+static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
+				  u16 length)
 {
 	struct port *port;
 	int ret = RX_HANDLER_ANOTHER;
@@ -2147,8 +2147,8 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u1
 		port = &(SLAVE_AD_INFO(slave).port);
 
 		if (!port->slave) {
-			pr_warning("%s: Warning: port of slave %s is uninitialized\n",
-				   slave->dev->name, slave->bond->dev->name);
+			pr_warn("%s: Warning: port of slave %s is uninitialized\n",
+				slave->dev->name, slave->bond->dev->name);
 			return ret;
 		}
 
@@ -2165,7 +2165,9 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u1
 
 		case AD_TYPE_MARKER:
 			ret = RX_HANDLER_CONSUMED;
-			// No need to convert fields to Little Endian since we don't use the marker's fields.
+			/* No need to convert fields to Little Endian since we
+			 * don't use the marker's fields.
+			 */
 
 			switch (((struct bond_marker *)lacpdu)->tlv_type) {
 			case AD_MARKER_INFORMATION_SUBTYPE:
@@ -2203,8 +2205,8 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
 
 	/* if slave is null, the whole port is not initialized */
 	if (!port->slave) {
-		pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
-			   slave->bond->dev->name, slave->dev->name);
+		pr_warn("Warning: %s: speed changed for uninitialized port on %s\n",
+			slave->bond->dev->name, slave->dev->name);
 		return;
 	}
 
@@ -2236,8 +2238,8 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
 
 	/* if slave is null, the whole port is not initialized */
 	if (!port->slave) {
-		pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
-			   slave->bond->dev->name, slave->dev->name);
+		pr_warn("%s: Warning: duplex changed for uninitialized port on %s\n",
+			slave->bond->dev->name, slave->dev->name);
 		return;
 	}
 
@@ -2270,8 +2272,8 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
 
 	/* if slave is null, the whole port is not initialized */
 	if (!port->slave) {
-		pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
-			   slave->bond->dev->name, slave->dev->name);
+		pr_warn("Warning: %s: link status changed for uninitialized port on %s\n",
+			slave->bond->dev->name, slave->dev->name);
 		return;
 	}
 
@@ -2309,10 +2311,13 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
 	__release_state_machine_lock(port);
 }
 
-/*
- * set link state for bonding master: if we have an active
- * aggregator, we're up, if not, we're down.  Presumes that we cannot
- * have an active aggregator if there are no slaves with link up.
+/**
+ * bond_3ad_set_carrier - set link state for bonding master
+ * @bond - bonding structure
+ *
+ * if we have an active aggregator, we're up, if not, we're down.
+ * Presumes that we cannot have an active aggregator if there are
+ * no slaves with link up.
  *
  * This behavior complies with IEEE 802.3 section 43.3.9.
  *
@@ -2323,30 +2328,32 @@ int bond_3ad_set_carrier(struct bonding *bond)
 {
 	struct aggregator *active;
 	struct slave *first_slave;
+	int ret = 1;
 
-	first_slave = bond_first_slave(bond);
-	if (!first_slave)
-		return 0;
+	rcu_read_lock();
+	first_slave = bond_first_slave_rcu(bond);
+	if (!first_slave) {
+		ret = 0;
+		goto out;
+	}
 	active = __get_active_agg(&(SLAVE_AD_INFO(first_slave).aggregator));
 	if (active) {
 		/* are enough slaves available to consider link up? */
 		if (active->num_of_ports < bond->params.min_links) {
 			if (netif_carrier_ok(bond->dev)) {
 				netif_carrier_off(bond->dev);
-				return 1;
+				goto out;
 			}
 		} else if (!netif_carrier_ok(bond->dev)) {
 			netif_carrier_on(bond->dev);
-			return 1;
+			goto out;
 		}
-		return 0;
-	}
-
-	if (netif_carrier_ok(bond->dev)) {
+	} else if (netif_carrier_ok(bond->dev)) {
 		netif_carrier_off(bond->dev);
-		return 1;
 	}
-	return 0;
+out:
+	rcu_read_unlock();
+	return ret;
 }
 
 /**
@@ -2378,7 +2385,8 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
 		ad_info->ports = aggregator->num_of_ports;
 		ad_info->actor_key = aggregator->actor_oper_aggregator_key;
 		ad_info->partner_key = aggregator->partner_oper_aggregator_key;
-		memcpy(ad_info->partner_system, aggregator->partner_system.mac_addr_value, ETH_ALEN);
+		memcpy(ad_info->partner_system,
+		       aggregator->partner_system.mac_addr_value, ETH_ALEN);
 		return 0;
 	}
 
@@ -2406,13 +2414,12 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
 	struct list_head *iter;
 	int slaves_in_agg;
 	int slave_agg_no;
-	int res = 1;
 	int agg_id;
 
 	if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
 		pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
 			 dev->name);
-		goto out;
+		goto err_free;
 	}
 
 	slaves_in_agg = ad_info.ports;
@@ -2420,7 +2427,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
 
 	if (slaves_in_agg == 0) {
 		pr_debug("%s: Error: active aggregator is empty\n", dev->name);
-		goto out;
+		goto err_free;
 	}
 
 	slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
@@ -2439,7 +2446,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
 		}
 
 		if (SLAVE_IS_OK(slave)) {
-			res = bond_dev_queue_xmit(bond, skb, slave->dev);
+			bond_dev_queue_xmit(bond, skb, slave->dev);
 			goto out;
 		}
 	}
@@ -2447,21 +2454,23 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
 	if (slave_agg_no >= 0) {
 		pr_err("%s: Error: Couldn't find a slave to tx on for aggregator ID %d\n",
 		       dev->name, agg_id);
-		goto out;
+		goto err_free;
 	}
 
 	/* we couldn't find any suitable slave after the agg_no, so use the
-	 * first suitable found, if found. */
+	 * first suitable found, if found.
+	 */
 	if (first_ok_slave)
-		res = bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
+		bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
+	else
+		goto err_free;
 
 out:
-	if (res) {
-		/* no suitable interface, frame not sent */
-		kfree_skb(skb);
-	}
-
 	return NETDEV_TX_OK;
+err_free:
+	/* no suitable interface, frame not sent */
+	kfree_skb(skb);
+	goto out;
 }
 
 int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
@@ -2483,7 +2492,10 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
 	return ret;
 }
 
-/*
+/**
+ * bond_3ad_update_lacp_rate - change the lacp rate
+ * @bond - bonding struct
+ *
  * When modify lacp_rate parameter via sysfs,
  * update actor_oper_port_state of each port.
  *
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 5d91ad0cc041..13dc9d3c5e34 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -265,7 +265,7 @@ struct ad_slave_info {
 
 // ================= AD Exported functions to the main bonding code ==================
 void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
-int  bond_3ad_bind_slave(struct slave *slave);
+void  bond_3ad_bind_slave(struct slave *slave);
 void bond_3ad_unbind_slave(struct slave *slave);
 void bond_3ad_state_machine_handler(struct work_struct *);
 void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 02872405d35d..a2c47476804d 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -12,8 +12,7 @@
  * for more details.
  *
  * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
@@ -470,7 +469,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
 
 /* slave being removed should not be active at this point
  *
- * Caller must hold bond lock for read
+ * Caller must hold rtnl.
  */
 static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
 {
@@ -816,7 +815,7 @@ static void rlb_rebalance(struct bonding *bond)
 	for (; hash_index != RLB_NULL_INDEX;
 	     hash_index = client_info->used_next) {
 		client_info = &(bond_info->rx_hashtbl[hash_index]);
-		assigned_slave = rlb_next_rx_slave(bond);
+		assigned_slave = __rlb_next_rx_slave(bond);
 		if (assigned_slave && (client_info->slave != assigned_slave)) {
 			client_info->slave = assigned_slave;
 			client_info->ntt = 1;
@@ -1372,7 +1371,6 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
 	int do_tx_balance = 1;
 	u32 hash_index = 0;
 	const u8 *hash_start = NULL;
-	int res = 1;
 	struct ipv6hdr *ip6hdr;
 
 	skb_reset_mac_header(skb);
@@ -1470,7 +1468,8 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
 			       ETH_ALEN);
 		}
 
-		res = bond_dev_queue_xmit(bond, skb, tx_slave->dev);
+		bond_dev_queue_xmit(bond, skb, tx_slave->dev);
+		goto out;
 	} else {
 		if (tx_slave) {
 			_lock_tx_hashtbl(bond);
@@ -1479,11 +1478,9 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
 		}
 	}
 
-	if (res) {
-		/* no suitable interface, frame not sent */
-		kfree_skb(skb);
-	}
-
+	/* no suitable interface, frame not sent */
+	kfree_skb(skb);
+out:
 	return NETDEV_TX_OK;
 }
 
@@ -1495,14 +1492,14 @@ void bond_alb_monitor(struct work_struct *work)
 	struct list_head *iter;
 	struct slave *slave;
 
-	read_lock(&bond->lock);
-
 	if (!bond_has_slaves(bond)) {
 		bond_info->tx_rebalance_counter = 0;
 		bond_info->lp_counter = 0;
 		goto re_arm;
 	}
 
+	rcu_read_lock();
+
 	bond_info->tx_rebalance_counter++;
 	bond_info->lp_counter++;
 
@@ -1515,7 +1512,7 @@ void bond_alb_monitor(struct work_struct *work)
 		 */
 		read_lock(&bond->curr_slave_lock);
 
-		bond_for_each_slave(bond, slave, iter)
+		bond_for_each_slave_rcu(bond, slave, iter)
 			alb_send_learning_packets(slave, slave->dev->dev_addr);
 
 		read_unlock(&bond->curr_slave_lock);
@@ -1528,7 +1525,7 @@ void bond_alb_monitor(struct work_struct *work)
 
 		read_lock(&bond->curr_slave_lock);
 
-		bond_for_each_slave(bond, slave, iter) {
+		bond_for_each_slave_rcu(bond, slave, iter) {
 			tlb_clear_slave(bond, slave, 1);
 			if (slave == bond->curr_active_slave) {
 				SLAVE_TLB_INFO(slave).load =
@@ -1552,11 +1549,9 @@ void bond_alb_monitor(struct work_struct *work)
 			 * dev_set_promiscuity requires rtnl and
 			 * nothing else.  Avoid race with bond_close.
 			 */
-			read_unlock(&bond->lock);
-			if (!rtnl_trylock()) {
-				read_lock(&bond->lock);
+			rcu_read_unlock();
+			if (!rtnl_trylock())
 				goto re_arm;
-			}
 
 			bond_info->rlb_promisc_timeout_counter = 0;
 
@@ -1568,7 +1563,7 @@ void bond_alb_monitor(struct work_struct *work)
 			bond_info->primary_is_promisc = 0;
 
 			rtnl_unlock();
-			read_lock(&bond->lock);
+			rcu_read_lock();
 		}
 
 		if (bond_info->rlb_rebalance) {
@@ -1590,11 +1585,9 @@ void bond_alb_monitor(struct work_struct *work)
 			}
 		}
 	}
-
+	rcu_read_unlock();
 re_arm:
 	queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
-
-	read_unlock(&bond->lock);
 }
 
 /* assumption: called before the slave is attached to the bond
@@ -1680,14 +1673,11 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
  * If new_slave is NULL, caller must hold curr_slave_lock or
  * bond->lock for write.
  *
- * If new_slave is not NULL, caller must hold RTNL, bond->lock for
- * read and curr_slave_lock for write.  Processing here may sleep, so
- * no other locks may be held.
+ * If new_slave is not NULL, caller must hold RTNL, curr_slave_lock
+ * for write.  Processing here may sleep, so no other locks may be held.
  */
 void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave)
 	__releases(&bond->curr_slave_lock)
-	__releases(&bond->lock)
-	__acquires(&bond->lock)
 	__acquires(&bond->curr_slave_lock)
 {
 	struct slave *swap_slave;
@@ -1723,7 +1713,6 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
 	tlb_clear_slave(bond, new_slave, 1);
 
 	write_unlock_bh(&bond->curr_slave_lock);
-	read_unlock(&bond->lock);
 
 	ASSERT_RTNL();
 
@@ -1749,11 +1738,9 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
 		/* swap mac address */
 		alb_swap_mac_addr(swap_slave, new_slave);
 		alb_fasten_mac_swap(bond, swap_slave, new_slave);
-		read_lock(&bond->lock);
 	} else {
 		/* set the new_slave to the bond mac address */
 		alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
-		read_lock(&bond->lock);
 		alb_send_learning_packets(new_slave, bond->dev->dev_addr);
 	}
 
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 4226044efd08..e09dd4bfafff 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -12,8 +12,7 @@
  * for more details.
  *
  * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6191b551a0e8..a7db819bca92 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -79,7 +79,6 @@
 #include <net/pkt_sched.h>
 #include <linux/rculist.h>
 #include <net/flow_keys.h>
-#include <linux/reciprocal_div.h>
 #include "bonding.h"
 #include "bond_3ad.h"
 #include "bond_alb.h"
@@ -87,13 +86,11 @@
 /*---------------------------- Module parameters ----------------------------*/
 
 /* monitor all links that often (in milliseconds). <=0 disables monitoring */
-#define BOND_LINK_MON_INTERV	0
-#define BOND_LINK_ARP_INTERV	0
 
 static int max_bonds	= BOND_DEFAULT_MAX_BONDS;
 static int tx_queues	= BOND_DEFAULT_TX_QUEUES;
 static int num_peer_notif = 1;
-static int miimon	= BOND_LINK_MON_INTERV;
+static int miimon;
 static int updelay;
 static int downdelay;
 static int use_carrier	= 1;
@@ -104,7 +101,7 @@ static char *lacp_rate;
 static int min_links;
 static char *ad_select;
 static char *xmit_hash_policy;
-static int arp_interval = BOND_LINK_ARP_INTERV;
+static int arp_interval;
 static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
 static char *arp_validate;
 static char *arp_all_targets;
@@ -113,6 +110,7 @@ static int all_slaves_active;
 static struct bond_params bonding_defaults;
 static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
 static int packets_per_slave = 1;
+static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
 
 module_param(max_bonds, int, 0);
 MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
@@ -189,6 +187,10 @@ module_param(packets_per_slave, int, 0);
 MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
 				    "mode; 0 for a random slave, 1 packet per "
 				    "slave (default), >1 packets per slave.");
+module_param(lp_interval, uint, 0);
+MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
+			      "the bonding driver sends learning packets to "
+			      "each slaves peer switch. The default is 1.");
 
 /*----------------------------- Global variables ----------------------------*/
 
@@ -204,67 +206,6 @@ static int bond_mode	= BOND_MODE_ROUNDROBIN;
 static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
 static int lacp_fast;
 
-const struct bond_parm_tbl bond_lacp_tbl[] = {
-{	"slow",		AD_LACP_SLOW},
-{	"fast",		AD_LACP_FAST},
-{	NULL,		-1},
-};
-
-const struct bond_parm_tbl bond_mode_tbl[] = {
-{	"balance-rr",		BOND_MODE_ROUNDROBIN},
-{	"active-backup",	BOND_MODE_ACTIVEBACKUP},
-{	"balance-xor",		BOND_MODE_XOR},
-{	"broadcast",		BOND_MODE_BROADCAST},
-{	"802.3ad",		BOND_MODE_8023AD},
-{	"balance-tlb",		BOND_MODE_TLB},
-{	"balance-alb",		BOND_MODE_ALB},
-{	NULL,			-1},
-};
-
-const struct bond_parm_tbl xmit_hashtype_tbl[] = {
-{	"layer2",		BOND_XMIT_POLICY_LAYER2},
-{	"layer3+4",		BOND_XMIT_POLICY_LAYER34},
-{	"layer2+3",		BOND_XMIT_POLICY_LAYER23},
-{	"encap2+3",		BOND_XMIT_POLICY_ENCAP23},
-{	"encap3+4",		BOND_XMIT_POLICY_ENCAP34},
-{	NULL,			-1},
-};
-
-const struct bond_parm_tbl arp_all_targets_tbl[] = {
-{	"any",			BOND_ARP_TARGETS_ANY},
-{	"all",			BOND_ARP_TARGETS_ALL},
-{	NULL,			-1},
-};
-
-const struct bond_parm_tbl arp_validate_tbl[] = {
-{	"none",			BOND_ARP_VALIDATE_NONE},
-{	"active",		BOND_ARP_VALIDATE_ACTIVE},
-{	"backup",		BOND_ARP_VALIDATE_BACKUP},
-{	"all",			BOND_ARP_VALIDATE_ALL},
-{	NULL,			-1},
-};
-
-const struct bond_parm_tbl fail_over_mac_tbl[] = {
-{	"none",			BOND_FOM_NONE},
-{	"active",		BOND_FOM_ACTIVE},
-{	"follow",		BOND_FOM_FOLLOW},
-{	NULL,			-1},
-};
-
-const struct bond_parm_tbl pri_reselect_tbl[] = {
-{	"always",		BOND_PRI_RESELECT_ALWAYS},
-{	"better",		BOND_PRI_RESELECT_BETTER},
-{	"failure",		BOND_PRI_RESELECT_FAILURE},
-{	NULL,			-1},
-};
-
-struct bond_parm_tbl ad_select_tbl[] = {
-{	"stable",	BOND_AD_STABLE},
-{	"bandwidth",	BOND_AD_BANDWIDTH},
-{	"count",	BOND_AD_COUNT},
-{	NULL,		-1},
-};
-
 /*-------------------------- Forward declarations ---------------------------*/
 
 static int bond_init(struct net_device *bond_dev);
@@ -299,7 +240,7 @@ const char *bond_mode_name(int mode)
  * @skb: hw accel VLAN tagged skb to transmit
  * @slave_dev: slave that is supposed to xmit this skbuff
  */
-int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
+void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
 			struct net_device *slave_dev)
 {
 	skb->dev = slave_dev;
@@ -312,8 +253,6 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
 		bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
 	else
 		dev_queue_xmit(skb);
-
-	return 0;
 }
 
 /*
@@ -463,6 +402,22 @@ static void bond_update_speed_duplex(struct slave *slave)
 	return;
 }
 
+const char *bond_slave_link_status(s8 link)
+{
+	switch (link) {
+	case BOND_LINK_UP:
+		return "up";
+	case BOND_LINK_FAIL:
+		return "going down";
+	case BOND_LINK_DOWN:
+		return "down";
+	case BOND_LINK_BACK:
+		return "going back";
+	default:
+		return "unknown";
+	}
+}
+
 /*
  * if <dev> supports MII link status reporting, check its link status.
  *
@@ -591,33 +546,22 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
  * device and retransmit an IGMP JOIN request to the current active
  * slave.
  */
-static void bond_resend_igmp_join_requests(struct bonding *bond)
+static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
 {
+	struct bonding *bond = container_of(work, struct bonding,
+					    mcast_work.work);
+
 	if (!rtnl_trylock()) {
 		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
 		return;
 	}
 	call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
-	rtnl_unlock();
 
-	/* We use curr_slave_lock to protect against concurrent access to
-	 * igmp_retrans from multiple running instances of this function and
-	 * bond_change_active_slave
-	 */
-	write_lock_bh(&bond->curr_slave_lock);
 	if (bond->igmp_retrans > 1) {
 		bond->igmp_retrans--;
 		queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
 	}
-	write_unlock_bh(&bond->curr_slave_lock);
-}
-
-static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
-{
-	struct bonding *bond = container_of(work, struct bonding,
-					    mcast_work.work);
-
-	bond_resend_igmp_join_requests(bond);
+	rtnl_unlock();
 }
 
 /* Flush bond's hardware addresses from slave
@@ -697,14 +641,12 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
  *
  * Perform special MAC address swapping for fail_over_mac settings
  *
- * Called with RTNL, bond->lock for read, curr_slave_lock for write_bh.
+ * Called with RTNL, curr_slave_lock for write_bh.
  */
 static void bond_do_fail_over_mac(struct bonding *bond,
 				  struct slave *new_active,
 				  struct slave *old_active)
 	__releases(&bond->curr_slave_lock)
-	__releases(&bond->lock)
-	__acquires(&bond->lock)
 	__acquires(&bond->curr_slave_lock)
 {
 	u8 tmp_mac[ETH_ALEN];
@@ -715,9 +657,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
 	case BOND_FOM_ACTIVE:
 		if (new_active) {
 			write_unlock_bh(&bond->curr_slave_lock);
-			read_unlock(&bond->lock);
 			bond_set_dev_addr(bond->dev, new_active->dev);
-			read_lock(&bond->lock);
 			write_lock_bh(&bond->curr_slave_lock);
 		}
 		break;
@@ -731,7 +671,6 @@ static void bond_do_fail_over_mac(struct bonding *bond,
 			return;
 
 		write_unlock_bh(&bond->curr_slave_lock);
-		read_unlock(&bond->lock);
 
 		if (old_active) {
 			memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
@@ -761,7 +700,6 @@ static void bond_do_fail_over_mac(struct bonding *bond,
 			pr_err("%s: Error %d setting MAC of slave %s\n",
 			       bond->dev->name, -rv, new_active->dev->name);
 out:
-		read_lock(&bond->lock);
 		write_lock_bh(&bond->curr_slave_lock);
 		break;
 	default:
@@ -821,7 +759,11 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
 
 static bool bond_should_notify_peers(struct bonding *bond)
 {
-	struct slave *slave = bond->curr_active_slave;
+	struct slave *slave;
+
+	rcu_read_lock();
+	slave = rcu_dereference(bond->curr_active_slave);
+	rcu_read_unlock();
 
 	pr_debug("bond_should_notify_peers: bond %s slave %s\n",
 		 bond->dev->name, slave ? slave->dev->name : "NULL");
@@ -846,8 +788,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
  * because it is apparently the best available slave we have, even though its
  * updelay hasn't timed out yet.
  *
- * If new_active is not NULL, caller must hold bond->lock for read and
- * curr_slave_lock for write_bh.
+ * If new_active is not NULL, caller must hold curr_slave_lock for write_bh.
  */
 void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
 {
@@ -916,14 +857,12 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
 			}
 
 			write_unlock_bh(&bond->curr_slave_lock);
-			read_unlock(&bond->lock);
 
 			call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
 			if (should_notify_peers)
 				call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
 							 bond->dev);
 
-			read_lock(&bond->lock);
 			write_lock_bh(&bond->curr_slave_lock);
 		}
 	}
@@ -949,7 +888,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
  * - The primary_slave has got its link back.
  * - A slave has got its link back and there's no old curr_active_slave.
  *
- * Caller must hold bond->lock for read and curr_slave_lock for write_bh.
+ * Caller must hold curr_slave_lock for write_bh.
  */
 void bond_select_active_slave(struct bonding *bond)
 {
@@ -1589,16 +1528,20 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 		goto err_unregister;
 	}
 
+	res = bond_sysfs_slave_add(new_slave);
+	if (res) {
+		pr_debug("Error %d calling bond_sysfs_slave_add\n", res);
+		goto err_upper_unlink;
+	}
+
 	bond->slave_cnt++;
 	bond_compute_features(bond);
 	bond_set_carrier(bond);
 
 	if (USES_PRIMARY(bond->params.mode)) {
-		read_lock(&bond->lock);
 		write_lock_bh(&bond->curr_slave_lock);
 		bond_select_active_slave(bond);
 		write_unlock_bh(&bond->curr_slave_lock);
-		read_unlock(&bond->lock);
 	}
 
 	pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
@@ -1610,6 +1553,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 	return 0;
 
 /* Undo stages on error */
+err_upper_unlink:
+	bond_upper_dev_unlink(bond_dev, slave_dev);
+
 err_unregister:
 	netdev_rx_handler_unregister(slave_dev);
 
@@ -1618,19 +1564,13 @@ err_detach:
 		bond_hw_addr_flush(bond_dev, slave_dev);
 
 	vlan_vids_del_by_dev(slave_dev, bond_dev);
-	write_lock_bh(&bond->lock);
 	if (bond->primary_slave == new_slave)
 		bond->primary_slave = NULL;
 	if (bond->curr_active_slave == new_slave) {
-		bond_change_active_slave(bond, NULL);
-		write_unlock_bh(&bond->lock);
-		read_lock(&bond->lock);
 		write_lock_bh(&bond->curr_slave_lock);
+		bond_change_active_slave(bond, NULL);
 		bond_select_active_slave(bond);
 		write_unlock_bh(&bond->curr_slave_lock);
-		read_unlock(&bond->lock);
-	} else {
-		write_unlock_bh(&bond->lock);
 	}
 	slave_disable_netpoll(new_slave);
 
@@ -1658,7 +1598,7 @@ err_free:
 err_undo_flags:
 	/* Enslave of first slave has failed and we need to fix master's mac */
 	if (!bond_has_slaves(bond) &&
-	    ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
+	    ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr))
 		eth_hw_addr_random(bond_dev);
 
 	return res;
@@ -1695,23 +1635,21 @@ static int __bond_release_one(struct net_device *bond_dev,
 	}
 
 	block_netpoll_tx();
-	write_lock_bh(&bond->lock);
 
 	slave = bond_get_slave_by_dev(bond, slave_dev);
 	if (!slave) {
 		/* not a slave of this bond */
 		pr_info("%s: %s not enslaved\n",
 			bond_dev->name, slave_dev->name);
-		write_unlock_bh(&bond->lock);
 		unblock_netpoll_tx();
 		return -EINVAL;
 	}
 
-	write_unlock_bh(&bond->lock);
-
 	/* release the slave from its bond */
 	bond->slave_cnt--;
 
+	bond_sysfs_slave_del(slave);
+
 	bond_upper_dev_unlink(bond_dev, slave_dev);
 	/* unregister rx_handler early so bond_handle_frame wouldn't be called
 	 * for this slave anymore.
@@ -1720,12 +1658,10 @@ static int __bond_release_one(struct net_device *bond_dev,
 	write_lock_bh(&bond->lock);
 
 	/* Inform AD package of unbinding of slave. */
-	if (bond->params.mode == BOND_MODE_8023AD) {
-		/* must be called before the slave is
-		 * detached from the list
-		 */
+	if (bond->params.mode == BOND_MODE_8023AD)
 		bond_3ad_unbind_slave(slave);
-	}
+
+	write_unlock_bh(&bond->lock);
 
 	pr_info("%s: releasing %s interface %s\n",
 		bond_dev->name,
@@ -1737,7 +1673,7 @@ static int __bond_release_one(struct net_device *bond_dev,
 	bond->current_arp_slave = NULL;
 
 	if (!all && !bond->params.fail_over_mac) {
-		if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
+		if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
 		    bond_has_slaves(bond))
 			pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
 				   bond_dev->name, slave_dev->name,
@@ -1748,8 +1684,11 @@ static int __bond_release_one(struct net_device *bond_dev,
 	if (bond->primary_slave == slave)
 		bond->primary_slave = NULL;
 
-	if (oldcurrent == slave)
+	if (oldcurrent == slave) {
+		write_lock_bh(&bond->curr_slave_lock);
 		bond_change_active_slave(bond, NULL);
+		write_unlock_bh(&bond->curr_slave_lock);
+	}
 
 	if (bond_is_lb(bond)) {
 		/* Must be called only after the slave has been
@@ -1757,9 +1696,7 @@ static int __bond_release_one(struct net_device *bond_dev,
 		 * has been cleared (if our_slave == old_current),
 		 * but before a new active slave is selected.
 		 */
-		write_unlock_bh(&bond->lock);
 		bond_alb_deinit_slave(bond, slave);
-		write_lock_bh(&bond->lock);
 	}
 
 	if (all) {
@@ -1770,15 +1707,11 @@ static int __bond_release_one(struct net_device *bond_dev,
 		 * is no concern that another slave add/remove event
 		 * will interfere.
 		 */
-		write_unlock_bh(&bond->lock);
-		read_lock(&bond->lock);
 		write_lock_bh(&bond->curr_slave_lock);
 
 		bond_select_active_slave(bond);
 
 		write_unlock_bh(&bond->curr_slave_lock);
-		read_unlock(&bond->lock);
-		write_lock_bh(&bond->lock);
 	}
 
 	if (!bond_has_slaves(bond)) {
@@ -1793,7 +1726,6 @@ static int __bond_release_one(struct net_device *bond_dev,
 		}
 	}
 
-	write_unlock_bh(&bond->lock);
 	unblock_netpoll_tx();
 	synchronize_rcu();
 
@@ -1928,7 +1860,7 @@ static int bond_miimon_inspect(struct bonding *bond)
 
 	ignore_updelay = !bond->curr_active_slave ? true : false;
 
-	bond_for_each_slave(bond, slave, iter) {
+	bond_for_each_slave_rcu(bond, slave, iter) {
 		slave->new_link = BOND_LINK_NOCHANGE;
 
 		link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -2119,48 +2051,42 @@ do_failover:
  * an acquisition of appropriate locks followed by a commit phase to
  * implement whatever link state changes are indicated.
  */
-void bond_mii_monitor(struct work_struct *work)
+static void bond_mii_monitor(struct work_struct *work)
 {
 	struct bonding *bond = container_of(work, struct bonding,
 					    mii_work.work);
 	bool should_notify_peers = false;
 	unsigned long delay;
 
-	read_lock(&bond->lock);
-
 	delay = msecs_to_jiffies(bond->params.miimon);
 
 	if (!bond_has_slaves(bond))
 		goto re_arm;
 
+	rcu_read_lock();
+
 	should_notify_peers = bond_should_notify_peers(bond);
 
 	if (bond_miimon_inspect(bond)) {
-		read_unlock(&bond->lock);
+		rcu_read_unlock();
 
 		/* Race avoidance with bond_close cancel of workqueue */
 		if (!rtnl_trylock()) {
-			read_lock(&bond->lock);
 			delay = 1;
 			should_notify_peers = false;
 			goto re_arm;
 		}
 
-		read_lock(&bond->lock);
-
 		bond_miimon_commit(bond);
 
-		read_unlock(&bond->lock);
 		rtnl_unlock();	/* might sleep, hold no other locks */
-		read_lock(&bond->lock);
-	}
+	} else
+		rcu_read_unlock();
 
 re_arm:
 	if (bond->params.miimon)
 		queue_delayed_work(bond->wq, &bond->mii_work, delay);
 
-	read_unlock(&bond->lock);
-
 	if (should_notify_peers) {
 		if (!rtnl_trylock())
 			return;
@@ -2414,7 +2340,7 @@ static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
  * arp is transmitted to generate traffic. see activebackup_arp_monitor for
  * arp monitoring in active backup mode.
  */
-void bond_loadbalance_arp_mon(struct work_struct *work)
+static void bond_loadbalance_arp_mon(struct work_struct *work)
 {
 	struct bonding *bond = container_of(work, struct bonding,
 					    arp_work.work);
@@ -2422,12 +2348,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
 	struct list_head *iter;
 	int do_failover = 0;
 
-	read_lock(&bond->lock);
-
 	if (!bond_has_slaves(bond))
 		goto re_arm;
 
-	oldcurrent = bond->curr_active_slave;
+	rcu_read_lock();
+
+	oldcurrent = ACCESS_ONCE(bond->curr_active_slave);
 	/* see if any of the previous devices are up now (i.e. they have
 	 * xmt and rcv traffic). the curr_active_slave does not come into
 	 * the picture unless it is null. also, slave->jiffies is not needed
@@ -2436,7 +2362,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
 	 * TODO: what about up/down delay in arp mode? it wasn't here before
 	 *       so it can wait
 	 */
-	bond_for_each_slave(bond, slave, iter) {
+	bond_for_each_slave_rcu(bond, slave, iter) {
 		unsigned long trans_start = dev_trans_start(slave->dev);
 
 		if (slave->link != BOND_LINK_UP) {
@@ -2498,7 +2424,14 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
 			bond_arp_send_all(bond, slave);
 	}
 
+	rcu_read_unlock();
+
 	if (do_failover) {
+		/* the bond_select_active_slave must hold RTNL
+		 * and curr_slave_lock for write.
+		 */
+		if (!rtnl_trylock())
+			goto re_arm;
 		block_netpoll_tx();
 		write_lock_bh(&bond->curr_slave_lock);
 
@@ -2506,14 +2439,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
 
 		write_unlock_bh(&bond->curr_slave_lock);
 		unblock_netpoll_tx();
+		rtnl_unlock();
 	}
 
 re_arm:
 	if (bond->params.arp_interval)
 		queue_delayed_work(bond->wq, &bond->arp_work,
 				   msecs_to_jiffies(bond->params.arp_interval));
-
-	read_unlock(&bond->lock);
 }
 
 /*
@@ -2522,7 +2454,7 @@ re_arm:
  * place for the slave.  Returns 0 if no changes are found, >0 if changes
  * to link states must be committed.
  *
- * Called with bond->lock held for read.
+ * Called with rcu_read_lock hold.
  */
 static int bond_ab_arp_inspect(struct bonding *bond)
 {
@@ -2531,7 +2463,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
 	struct slave *slave;
 	int commit = 0;
 
-	bond_for_each_slave(bond, slave, iter) {
+	bond_for_each_slave_rcu(bond, slave, iter) {
 		slave->new_link = BOND_LINK_NOCHANGE;
 		last_rx = slave_last_rx(bond, slave);
 
@@ -2593,7 +2525,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
  * Called to commit link state changes noted by inspection step of
  * active-backup mode ARP monitor.
  *
- * Called with RTNL and bond->lock for read.
+ * Called with RTNL hold.
  */
 static void bond_ab_arp_commit(struct bonding *bond)
 {
@@ -2668,19 +2600,20 @@ do_failover:
 /*
  * Send ARP probes for active-backup mode ARP monitor.
  *
- * Called with bond->lock held for read.
+ * Called with rcu_read_lock hold.
  */
 static void bond_ab_arp_probe(struct bonding *bond)
 {
-	struct slave *slave, *before = NULL, *new_slave = NULL;
+	struct slave *slave, *before = NULL, *new_slave = NULL,
+		     *curr_arp_slave = rcu_dereference(bond->current_arp_slave);
 	struct list_head *iter;
 	bool found = false;
 
 	read_lock(&bond->curr_slave_lock);
 
-	if (bond->current_arp_slave && bond->curr_active_slave)
+	if (curr_arp_slave && bond->curr_active_slave)
 		pr_info("PROBE: c_arp %s && cas %s BAD\n",
-			bond->current_arp_slave->dev->name,
+			curr_arp_slave->dev->name,
 			bond->curr_active_slave->dev->name);
 
 	if (bond->curr_active_slave) {
@@ -2696,15 +2629,15 @@ static void bond_ab_arp_probe(struct bonding *bond)
 	 * for becoming the curr_active_slave
 	 */
 
-	if (!bond->current_arp_slave) {
-		bond->current_arp_slave = bond_first_slave(bond);
-		if (!bond->current_arp_slave)
+	if (!curr_arp_slave) {
+		curr_arp_slave = bond_first_slave_rcu(bond);
+		if (!curr_arp_slave)
 			return;
 	}
 
-	bond_set_slave_inactive_flags(bond->current_arp_slave);
+	bond_set_slave_inactive_flags(curr_arp_slave);
 
-	bond_for_each_slave(bond, slave, iter) {
+	bond_for_each_slave_rcu(bond, slave, iter) {
 		if (!found && !before && IS_UP(slave->dev))
 			before = slave;
 
@@ -2727,7 +2660,7 @@ static void bond_ab_arp_probe(struct bonding *bond)
 			pr_info("%s: backup interface %s is now down.\n",
 				bond->dev->name, slave->dev->name);
 		}
-		if (slave == bond->current_arp_slave)
+		if (slave == curr_arp_slave)
 			found = true;
 	}
 
@@ -2741,54 +2674,48 @@ static void bond_ab_arp_probe(struct bonding *bond)
 	bond_set_slave_active_flags(new_slave);
 	bond_arp_send_all(bond, new_slave);
 	new_slave->jiffies = jiffies;
-	bond->current_arp_slave = new_slave;
-
+	rcu_assign_pointer(bond->current_arp_slave, new_slave);
 }
 
-void bond_activebackup_arp_mon(struct work_struct *work)
+static void bond_activebackup_arp_mon(struct work_struct *work)
 {
 	struct bonding *bond = container_of(work, struct bonding,
 					    arp_work.work);
 	bool should_notify_peers = false;
 	int delta_in_ticks;
 
-	read_lock(&bond->lock);
-
 	delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
 
 	if (!bond_has_slaves(bond))
 		goto re_arm;
 
+	rcu_read_lock();
+
 	should_notify_peers = bond_should_notify_peers(bond);
 
 	if (bond_ab_arp_inspect(bond)) {
-		read_unlock(&bond->lock);
+		rcu_read_unlock();
 
 		/* Race avoidance with bond_close flush of workqueue */
 		if (!rtnl_trylock()) {
-			read_lock(&bond->lock);
 			delta_in_ticks = 1;
 			should_notify_peers = false;
 			goto re_arm;
 		}
 
-		read_lock(&bond->lock);
-
 		bond_ab_arp_commit(bond);
 
-		read_unlock(&bond->lock);
 		rtnl_unlock();
-		read_lock(&bond->lock);
+		rcu_read_lock();
 	}
 
 	bond_ab_arp_probe(bond);
+	rcu_read_unlock();
 
 re_arm:
 	if (bond->params.arp_interval)
 		queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
 
-	read_unlock(&bond->lock);
-
 	if (should_notify_peers) {
 		if (!rtnl_trylock())
 			return;
@@ -2896,9 +2823,27 @@ static int bond_slave_netdev_event(unsigned long event,
 		 */
 		break;
 	case NETDEV_CHANGENAME:
-		/*
-		 * TODO: handle changing the primary's name
-		 */
+		/* we don't care if we don't have primary set */
+		if (!USES_PRIMARY(bond->params.mode) ||
+		    !bond->params.primary[0])
+			break;
+
+		if (slave == bond->primary_slave) {
+			/* slave's name changed - he's no longer primary */
+			bond->primary_slave = NULL;
+		} else if (!strcmp(slave_dev->name, bond->params.primary)) {
+			/* we have a new primary slave */
+			bond->primary_slave = slave;
+		} else { /* we didn't change primary - exit */
+			break;
+		}
+
+		pr_info("%s: Primary slave changed to %s, reselecting active slave.\n",
+			bond->dev->name, bond->primary_slave ? slave_dev->name :
+							       "none");
+		write_lock_bh(&bond->curr_slave_lock);
+		bond_select_active_slave(bond);
+		write_unlock_bh(&bond->curr_slave_lock);
 		break;
 	case NETDEV_FEAT_CHANGE:
 		bond_compute_features(bond);
@@ -3178,6 +3123,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
 	struct ifslave k_sinfo;
 	struct ifslave __user *u_sinfo = NULL;
 	struct mii_ioctl_data *mii = NULL;
+	struct bond_opt_value newval;
 	struct net *net;
 	int res = 0;
 
@@ -3249,37 +3195,35 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
-	slave_dev = dev_get_by_name(net, ifr->ifr_slave);
+	slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
 
 	pr_debug("slave_dev=%p:\n", slave_dev);
 
 	if (!slave_dev)
-		res = -ENODEV;
-	else {
-		pr_debug("slave_dev->name=%s:\n", slave_dev->name);
-		switch (cmd) {
-		case BOND_ENSLAVE_OLD:
-		case SIOCBONDENSLAVE:
-			res = bond_enslave(bond_dev, slave_dev);
-			break;
-		case BOND_RELEASE_OLD:
-		case SIOCBONDRELEASE:
-			res = bond_release(bond_dev, slave_dev);
-			break;
-		case BOND_SETHWADDR_OLD:
-		case SIOCBONDSETHWADDR:
-			bond_set_dev_addr(bond_dev, slave_dev);
-			res = 0;
-			break;
-		case BOND_CHANGE_ACTIVE_OLD:
-		case SIOCBONDCHANGEACTIVE:
-			res = bond_option_active_slave_set(bond, slave_dev);
-			break;
-		default:
-			res = -EOPNOTSUPP;
-		}
+		return -ENODEV;
 
-		dev_put(slave_dev);
+	pr_debug("slave_dev->name=%s:\n", slave_dev->name);
+	switch (cmd) {
+	case BOND_ENSLAVE_OLD:
+	case SIOCBONDENSLAVE:
+		res = bond_enslave(bond_dev, slave_dev);
+		break;
+	case BOND_RELEASE_OLD:
+	case SIOCBONDRELEASE:
+		res = bond_release(bond_dev, slave_dev);
+		break;
+	case BOND_SETHWADDR_OLD:
+	case SIOCBONDSETHWADDR:
+		bond_set_dev_addr(bond_dev, slave_dev);
+		res = 0;
+		break;
+	case BOND_CHANGE_ACTIVE_OLD:
+	case SIOCBONDCHANGEACTIVE:
+		bond_opt_initstr(&newval, slave_dev->name);
+		res = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval);
+		break;
+	default:
+		res = -EOPNOTSUPP;
 	}
 
 	return res;
@@ -3550,7 +3494,7 @@ unwind:
  * it fails, it tries to find the first available slave for transmission.
  * The skb is consumed in all cases, thus the function is void.
  */
-void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
+static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
 {
 	struct list_head *iter;
 	struct slave *slave;
@@ -3590,8 +3534,9 @@ void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
  */
 static u32 bond_rr_gen_slave_id(struct bonding *bond)
 {
-	int packets_per_slave = bond->params.packets_per_slave;
 	u32 slave_id;
+	struct reciprocal_value reciprocal_packets_per_slave;
+	int packets_per_slave = bond->params.packets_per_slave;
 
 	switch (packets_per_slave) {
 	case 0:
@@ -3601,8 +3546,10 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
 		slave_id = bond->rr_tx_counter;
 		break;
 	default:
+		reciprocal_packets_per_slave =
+			bond->params.reciprocal_packets_per_slave;
 		slave_id = reciprocal_divide(bond->rr_tx_counter,
-					     packets_per_slave);
+					     reciprocal_packets_per_slave);
 		break;
 	}
 	bond->rr_tx_counter++;
@@ -3707,28 +3654,24 @@ static inline int bond_slave_override(struct bonding *bond,
 				      struct sk_buff *skb)
 {
 	struct slave *slave = NULL;
-	struct slave *check_slave;
 	struct list_head *iter;
-	int res = 1;
 
 	if (!skb->queue_mapping)
 		return 1;
 
 	/* Find out if any slaves have the same mapping as this skb. */
-	bond_for_each_slave_rcu(bond, check_slave, iter) {
-		if (check_slave->queue_id == skb->queue_mapping) {
-			slave = check_slave;
+	bond_for_each_slave_rcu(bond, slave, iter) {
+		if (slave->queue_id == skb->queue_mapping) {
+			if (slave_can_tx(slave)) {
+				bond_dev_queue_xmit(bond, skb, slave->dev);
+				return 0;
+			}
+			/* If the slave isn't UP, use default transmit policy. */
 			break;
 		}
 	}
 
-	/* If the slave isn't UP, use default transmit policy. */
-	if (slave && slave->queue_id && IS_UP(slave->dev) &&
-	    (slave->link == BOND_LINK_UP)) {
-		res = bond_dev_queue_xmit(bond, skb, slave->dev);
-	}
-
-	return res;
+	return 1;
 }
 
 
@@ -3941,6 +3884,9 @@ void bond_setup(struct net_device *bond_dev)
 	 * capable
 	 */
 
+	/* Don't allow bond devices to change network namespaces. */
+	bond_dev->features |= NETIF_F_NETNS_LOCAL;
+
 	bond_dev->hw_features = BOND_VLAN_FEATURES |
 				NETIF_F_HW_VLAN_CTAG_TX |
 				NETIF_F_HW_VLAN_CTAG_RX |
@@ -3974,6 +3920,29 @@ static void bond_uninit(struct net_device *bond_dev)
 
 /*------------------------- Module initialization ---------------------------*/
 
+int bond_parm_tbl_lookup(int mode, const struct bond_parm_tbl *tbl)
+{
+	int i;
+
+	for (i = 0; tbl[i].modename; i++)
+		if (mode == tbl[i].mode)
+			return tbl[i].mode;
+
+	return -1;
+}
+
+static int bond_parm_tbl_lookup_name(const char *modename,
+				     const struct bond_parm_tbl *tbl)
+{
+	int i;
+
+	for (i = 0; tbl[i].modename; i++)
+		if (strcmp(modename, tbl[i].modename) == 0)
+			return tbl[i].mode;
+
+	return -1;
+}
+
 /*
  * Convert string input module parms.  Accept either the
  * number of the mode or its string name.  A bit complicated because
@@ -3982,27 +3951,17 @@ static void bond_uninit(struct net_device *bond_dev)
  */
 int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
 {
-	int modeint = -1, i, rv;
-	char *p, modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, };
+	int modeint;
+	char *p, modestr[BOND_MAX_MODENAME_LEN + 1];
 
 	for (p = (char *)buf; *p; p++)
 		if (!(isdigit(*p) || isspace(*p)))
 			break;
 
-	if (*p)
-		rv = sscanf(buf, "%20s", modestr);
-	else
-		rv = sscanf(buf, "%d", &modeint);
-
-	if (!rv)
-		return -1;
-
-	for (i = 0; tbl[i].modename; i++) {
-		if (modeint == tbl[i].mode)
-			return tbl[i].mode;
-		if (strcmp(modestr, tbl[i].modename) == 0)
-			return tbl[i].mode;
-	}
+	if (*p && sscanf(buf, "%20s", modestr) != 0)
+		return bond_parm_tbl_lookup_name(modestr, tbl);
+	else if (sscanf(buf, "%d", &modeint) != 0)
+		return bond_parm_tbl_lookup(modeint, tbl);
 
 	return -1;
 }
@@ -4010,18 +3969,20 @@ int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
 static int bond_check_params(struct bond_params *params)
 {
 	int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
+	struct bond_opt_value newval, *valptr;
 	int arp_all_targets_value;
 
 	/*
 	 * Convert string parameters.
 	 */
 	if (mode) {
-		bond_mode = bond_parse_parm(mode, bond_mode_tbl);
-		if (bond_mode == -1) {
-			pr_err("Error: Invalid bonding mode \"%s\"\n",
-			       mode == NULL ? "NULL" : mode);
+		bond_opt_initstr(&newval, mode);
+		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
+		if (!valptr) {
+			pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
 			return -EINVAL;
 		}
+		bond_mode = valptr->value;
 	}
 
 	if (xmit_hash_policy) {
@@ -4030,14 +3991,15 @@ static int bond_check_params(struct bond_params *params)
 			pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
 			       bond_mode_name(bond_mode));
 		} else {
-			xmit_hashtype = bond_parse_parm(xmit_hash_policy,
-							xmit_hashtype_tbl);
-			if (xmit_hashtype == -1) {
+			bond_opt_initstr(&newval, xmit_hash_policy);
+			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
+						&newval);
+			if (!valptr) {
 				pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
-				       xmit_hash_policy == NULL ? "NULL" :
 				       xmit_hash_policy);
 				return -EINVAL;
 			}
+			xmit_hashtype = valptr->value;
 		}
 	}
 
@@ -4046,26 +4008,29 @@ static int bond_check_params(struct bond_params *params)
 			pr_info("lacp_rate param is irrelevant in mode %s\n",
 				bond_mode_name(bond_mode));
 		} else {
-			lacp_fast = bond_parse_parm(lacp_rate, bond_lacp_tbl);
-			if (lacp_fast == -1) {
+			bond_opt_initstr(&newval, lacp_rate);
+			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
+						&newval);
+			if (!valptr) {
 				pr_err("Error: Invalid lacp rate \"%s\"\n",
-				       lacp_rate == NULL ? "NULL" : lacp_rate);
+				       lacp_rate);
 				return -EINVAL;
 			}
+			lacp_fast = valptr->value;
 		}
 	}
 
 	if (ad_select) {
-		params->ad_select = bond_parse_parm(ad_select, ad_select_tbl);
-		if (params->ad_select == -1) {
-			pr_err("Error: Invalid ad_select \"%s\"\n",
-			       ad_select == NULL ? "NULL" : ad_select);
+		bond_opt_initstr(&newval, lacp_rate);
+		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
+					&newval);
+		if (!valptr) {
+			pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
 			return -EINVAL;
 		}
-
-		if (bond_mode != BOND_MODE_8023AD) {
+		params->ad_select = valptr->value;
+		if (bond_mode != BOND_MODE_8023AD)
 			pr_warning("ad_select param only affects 802.3ad mode\n");
-		}
 	} else {
 		params->ad_select = BOND_AD_STABLE;
 	}
@@ -4077,9 +4042,9 @@ static int bond_check_params(struct bond_params *params)
 	}
 
 	if (miimon < 0) {
-		pr_warning("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to %d\n",
-			   miimon, INT_MAX, BOND_LINK_MON_INTERV);
-		miimon = BOND_LINK_MON_INTERV;
+		pr_warning("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+			   miimon, INT_MAX);
+		miimon = 0;
 	}
 
 	if (updelay < 0) {
@@ -4106,8 +4071,8 @@ static int bond_check_params(struct bond_params *params)
 		num_peer_notif = 1;
 	}
 
-	/* reset values for 802.3ad */
-	if (bond_mode == BOND_MODE_8023AD) {
+	/* reset values for 802.3ad/TLB/ALB */
+	if (BOND_NO_USES_ARP(bond_mode)) {
 		if (!miimon) {
 			pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
 			pr_warning("Forcing miimon to 100msec\n");
@@ -4136,22 +4101,13 @@ static int bond_check_params(struct bond_params *params)
 		resend_igmp = BOND_DEFAULT_RESEND_IGMP;
 	}
 
-	if (packets_per_slave < 0 || packets_per_slave > USHRT_MAX) {
+	bond_opt_initval(&newval, packets_per_slave);
+	if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
 		pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
 			packets_per_slave, USHRT_MAX);
 		packets_per_slave = 1;
 	}
 
-	/* reset values for TLB/ALB */
-	if ((bond_mode == BOND_MODE_TLB) ||
-	    (bond_mode == BOND_MODE_ALB)) {
-		if (!miimon) {
-			pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n");
-			pr_warning("Forcing miimon to 100msec\n");
-			miimon = BOND_DEFAULT_MIIMON;
-		}
-	}
-
 	if (bond_mode == BOND_MODE_ALB) {
 		pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
 			  updelay);
@@ -4191,9 +4147,9 @@ static int bond_check_params(struct bond_params *params)
 	}
 
 	if (arp_interval < 0) {
-		pr_warning("Warning: arp_interval module parameter (%d) , not in range 0-%d, so it was reset to %d\n",
-			   arp_interval, INT_MAX, BOND_LINK_ARP_INTERV);
-		arp_interval = BOND_LINK_ARP_INTERV;
+		pr_warning("Warning: arp_interval module parameter (%d) , not in range 0-%d, so it was reset to 0\n",
+			   arp_interval, INT_MAX);
+		arp_interval = 0;
 	}
 
 	for (arp_ip_count = 0, i = 0;
@@ -4232,35 +4188,40 @@ static int bond_check_params(struct bond_params *params)
 			return -EINVAL;
 		}
 
-		arp_validate_value = bond_parse_parm(arp_validate,
-						     arp_validate_tbl);
-		if (arp_validate_value == -1) {
+		bond_opt_initstr(&newval, arp_validate);
+		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
+					&newval);
+		if (!valptr) {
 			pr_err("Error: invalid arp_validate \"%s\"\n",
-			       arp_validate == NULL ? "NULL" : arp_validate);
+			       arp_validate);
 			return -EINVAL;
 		}
-	} else
+		arp_validate_value = valptr->value;
+	} else {
 		arp_validate_value = 0;
+	}
 
 	arp_all_targets_value = 0;
 	if (arp_all_targets) {
-		arp_all_targets_value = bond_parse_parm(arp_all_targets,
-							arp_all_targets_tbl);
-
-		if (arp_all_targets_value == -1) {
+		bond_opt_initstr(&newval, arp_all_targets);
+		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
+					&newval);
+		if (!valptr) {
 			pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
 			       arp_all_targets);
 			arp_all_targets_value = 0;
+		} else {
+			arp_all_targets_value = valptr->value;
 		}
 	}
 
 	if (miimon) {
 		pr_info("MII link monitoring set to %d ms\n", miimon);
 	} else if (arp_interval) {
+		valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
+					  arp_validate_value);
 		pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
-			arp_interval,
-			arp_validate_tbl[arp_validate_value].modename,
-			arp_ip_count);
+			arp_interval, valptr->string, arp_ip_count);
 
 		for (i = 0; i < arp_ip_count; i++)
 			pr_info(" %s", arp_ip_target[i]);
@@ -4284,33 +4245,41 @@ static int bond_check_params(struct bond_params *params)
 	}
 
 	if (primary && primary_reselect) {
-		primary_reselect_value = bond_parse_parm(primary_reselect,
-							 pri_reselect_tbl);
-		if (primary_reselect_value == -1) {
+		bond_opt_initstr(&newval, primary_reselect);
+		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
+					&newval);
+		if (!valptr) {
 			pr_err("Error: Invalid primary_reselect \"%s\"\n",
-			       primary_reselect ==
-					NULL ? "NULL" : primary_reselect);
+			       primary_reselect);
 			return -EINVAL;
 		}
+		primary_reselect_value = valptr->value;
 	} else {
 		primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
 	}
 
 	if (fail_over_mac) {
-		fail_over_mac_value = bond_parse_parm(fail_over_mac,
-						      fail_over_mac_tbl);
-		if (fail_over_mac_value == -1) {
+		bond_opt_initstr(&newval, fail_over_mac);
+		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
+					&newval);
+		if (!valptr) {
 			pr_err("Error: invalid fail_over_mac \"%s\"\n",
-			       arp_validate == NULL ? "NULL" : arp_validate);
+			       fail_over_mac);
 			return -EINVAL;
 		}
-
+		fail_over_mac_value = valptr->value;
 		if (bond_mode != BOND_MODE_ACTIVEBACKUP)
 			pr_warning("Warning: fail_over_mac only affects active-backup mode.\n");
 	} else {
 		fail_over_mac_value = BOND_FOM_NONE;
 	}
 
+	if (lp_interval == 0) {
+		pr_warning("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
+			   INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
+		lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
+	}
+
 	/* fill params struct with the proper values */
 	params->mode = bond_mode;
 	params->xmit_policy = xmit_hashtype;
@@ -4330,11 +4299,19 @@ static int bond_check_params(struct bond_params *params)
 	params->all_slaves_active = all_slaves_active;
 	params->resend_igmp = resend_igmp;
 	params->min_links = min_links;
-	params->lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
-	if (packets_per_slave > 1)
-		params->packets_per_slave = reciprocal_value(packets_per_slave);
-	else
-		params->packets_per_slave = packets_per_slave;
+	params->lp_interval = lp_interval;
+	params->packets_per_slave = packets_per_slave;
+	if (packets_per_slave > 0) {
+		params->reciprocal_packets_per_slave =
+			reciprocal_value(packets_per_slave);
+	} else {
+		/* reciprocal_packets_per_slave is unused if
+		 * packets_per_slave is 0 or 1, just initialize it
+		 */
+		params->reciprocal_packets_per_slave =
+			(struct reciprocal_value) { 0 };
+	}
+
 	if (primary) {
 		strncpy(params->primary, primary, IFNAMSIZ);
 		params->primary[IFNAMSIZ - 1] = 0;
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 40e7b1cb4aea..70651f8e8e3b 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -1,6 +1,7 @@
 /*
  * drivers/net/bond/bond_netlink.c - Netlink interface for bonding
  * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -20,9 +21,81 @@
 #include <net/rtnetlink.h>
 #include "bonding.h"
 
+static size_t bond_get_slave_size(const struct net_device *bond_dev,
+				  const struct net_device *slave_dev)
+{
+	return nla_total_size(sizeof(u8)) +	/* IFLA_BOND_SLAVE_STATE */
+		nla_total_size(sizeof(u8)) +	/* IFLA_BOND_SLAVE_MII_STATUS */
+		nla_total_size(sizeof(u32)) +	/* IFLA_BOND_SLAVE_LINK_FAILURE_COUNT */
+		nla_total_size(MAX_ADDR_LEN) +	/* IFLA_BOND_SLAVE_PERM_HWADDR */
+		nla_total_size(sizeof(u16)) +	/* IFLA_BOND_SLAVE_QUEUE_ID */
+		nla_total_size(sizeof(u16)) +	/* IFLA_BOND_SLAVE_AD_AGGREGATOR_ID */
+		0;
+}
+
+static int bond_fill_slave_info(struct sk_buff *skb,
+				const struct net_device *bond_dev,
+				const struct net_device *slave_dev)
+{
+	struct slave *slave = bond_slave_get_rtnl(slave_dev);
+
+	if (nla_put_u8(skb, IFLA_BOND_SLAVE_STATE, bond_slave_state(slave)))
+		goto nla_put_failure;
+
+	if (nla_put_u8(skb, IFLA_BOND_SLAVE_MII_STATUS, slave->link))
+		goto nla_put_failure;
+
+	if (nla_put_u32(skb, IFLA_BOND_SLAVE_LINK_FAILURE_COUNT,
+			slave->link_failure_count))
+		goto nla_put_failure;
+
+	if (nla_put(skb, IFLA_BOND_SLAVE_PERM_HWADDR,
+		    slave_dev->addr_len, slave->perm_hwaddr))
+		goto nla_put_failure;
+
+	if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
+		goto nla_put_failure;
+
+	if (slave->bond->params.mode == BOND_MODE_8023AD) {
+		const struct aggregator *agg;
+
+		agg = SLAVE_AD_INFO(slave).port.aggregator;
+		if (agg)
+			if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
+					agg->aggregator_identifier))
+				goto nla_put_failure;
+	}
+
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
 static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
 	[IFLA_BOND_MODE]		= { .type = NLA_U8 },
 	[IFLA_BOND_ACTIVE_SLAVE]	= { .type = NLA_U32 },
+	[IFLA_BOND_MIIMON]		= { .type = NLA_U32 },
+	[IFLA_BOND_UPDELAY]		= { .type = NLA_U32 },
+	[IFLA_BOND_DOWNDELAY]		= { .type = NLA_U32 },
+	[IFLA_BOND_USE_CARRIER]		= { .type = NLA_U8 },
+	[IFLA_BOND_ARP_INTERVAL]	= { .type = NLA_U32 },
+	[IFLA_BOND_ARP_IP_TARGET]	= { .type = NLA_NESTED },
+	[IFLA_BOND_ARP_VALIDATE]	= { .type = NLA_U32 },
+	[IFLA_BOND_ARP_ALL_TARGETS]	= { .type = NLA_U32 },
+	[IFLA_BOND_PRIMARY]		= { .type = NLA_U32 },
+	[IFLA_BOND_PRIMARY_RESELECT]	= { .type = NLA_U8 },
+	[IFLA_BOND_FAIL_OVER_MAC]	= { .type = NLA_U8 },
+	[IFLA_BOND_XMIT_HASH_POLICY]	= { .type = NLA_U8 },
+	[IFLA_BOND_RESEND_IGMP]		= { .type = NLA_U32 },
+	[IFLA_BOND_NUM_PEER_NOTIF]	= { .type = NLA_U8 },
+	[IFLA_BOND_ALL_SLAVES_ACTIVE]	= { .type = NLA_U8 },
+	[IFLA_BOND_MIN_LINKS]		= { .type = NLA_U32 },
+	[IFLA_BOND_LP_INTERVAL]		= { .type = NLA_U32 },
+	[IFLA_BOND_PACKETS_PER_SLAVE]	= { .type = NLA_U32 },
+	[IFLA_BOND_AD_LACP_RATE]	= { .type = NLA_U8 },
+	[IFLA_BOND_AD_SELECT]		= { .type = NLA_U8 },
+	[IFLA_BOND_AD_INFO]		= { .type = NLA_NESTED },
 };
 
 static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -40,28 +113,238 @@ static int bond_changelink(struct net_device *bond_dev,
 			   struct nlattr *tb[], struct nlattr *data[])
 {
 	struct bonding *bond = netdev_priv(bond_dev);
+	struct bond_opt_value newval;
+	int miimon = 0;
 	int err;
 
-	if (data && data[IFLA_BOND_MODE]) {
+	if (!data)
+		return 0;
+
+	if (data[IFLA_BOND_MODE]) {
 		int mode = nla_get_u8(data[IFLA_BOND_MODE]);
 
-		err = bond_option_mode_set(bond, mode);
+		bond_opt_initval(&newval, mode);
+		err = __bond_opt_set(bond, BOND_OPT_MODE, &newval);
 		if (err)
 			return err;
 	}
-	if (data && data[IFLA_BOND_ACTIVE_SLAVE]) {
+	if (data[IFLA_BOND_ACTIVE_SLAVE]) {
 		int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
 		struct net_device *slave_dev;
+		char *active_slave = "";
 
-		if (ifindex == 0) {
-			slave_dev = NULL;
-		} else {
+		if (ifindex != 0) {
 			slave_dev = __dev_get_by_index(dev_net(bond_dev),
 						       ifindex);
 			if (!slave_dev)
 				return -ENODEV;
+			active_slave = slave_dev->name;
+		}
+		bond_opt_initstr(&newval, active_slave);
+		err = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_MIIMON]) {
+		miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
+
+		bond_opt_initval(&newval, miimon);
+		err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_UPDELAY]) {
+		int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
+
+		bond_opt_initval(&newval, updelay);
+		err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_DOWNDELAY]) {
+		int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
+
+		bond_opt_initval(&newval, downdelay);
+		err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_USE_CARRIER]) {
+		int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
+
+		bond_opt_initval(&newval, use_carrier);
+		err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_ARP_INTERVAL]) {
+		int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
+
+		if (arp_interval && miimon) {
+			pr_err("%s: ARP monitoring cannot be used with MII monitoring.\n",
+			       bond->dev->name);
+			return -EINVAL;
 		}
-		err = bond_option_active_slave_set(bond, slave_dev);
+
+		bond_opt_initval(&newval, arp_interval);
+		err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_ARP_IP_TARGET]) {
+		struct nlattr *attr;
+		int i = 0, rem;
+
+		bond_option_arp_ip_targets_clear(bond);
+		nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
+			__be32 target = nla_get_be32(attr);
+
+			bond_opt_initval(&newval, target);
+			err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
+					     &newval);
+			if (err)
+				break;
+			i++;
+		}
+		if (i == 0 && bond->params.arp_interval)
+			pr_warn("%s: removing last arp target with arp_interval on\n",
+				bond->dev->name);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_ARP_VALIDATE]) {
+		int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
+
+		if (arp_validate && miimon) {
+			pr_err("%s: ARP validating cannot be used with MII monitoring.\n",
+			       bond->dev->name);
+			return -EINVAL;
+		}
+
+		bond_opt_initval(&newval, arp_validate);
+		err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_ARP_ALL_TARGETS]) {
+		int arp_all_targets =
+			nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
+
+		bond_opt_initval(&newval, arp_all_targets);
+		err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_PRIMARY]) {
+		int ifindex = nla_get_u32(data[IFLA_BOND_PRIMARY]);
+		struct net_device *dev;
+		char *primary = "";
+
+		dev = __dev_get_by_index(dev_net(bond_dev), ifindex);
+		if (dev)
+			primary = dev->name;
+
+		bond_opt_initstr(&newval, primary);
+		err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_PRIMARY_RESELECT]) {
+		int primary_reselect =
+			nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]);
+
+		bond_opt_initval(&newval, primary_reselect);
+		err = __bond_opt_set(bond, BOND_OPT_PRIMARY_RESELECT, &newval);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_FAIL_OVER_MAC]) {
+		int fail_over_mac =
+			nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]);
+
+		bond_opt_initval(&newval, fail_over_mac);
+		err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_XMIT_HASH_POLICY]) {
+		int xmit_hash_policy =
+			nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]);
+
+		bond_opt_initval(&newval, xmit_hash_policy);
+		err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_RESEND_IGMP]) {
+		int resend_igmp =
+			nla_get_u32(data[IFLA_BOND_RESEND_IGMP]);
+
+		bond_opt_initval(&newval, resend_igmp);
+		err = __bond_opt_set(bond, BOND_OPT_RESEND_IGMP, &newval);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_NUM_PEER_NOTIF]) {
+		int num_peer_notif =
+			nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]);
+
+		bond_opt_initval(&newval, num_peer_notif);
+		err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_ALL_SLAVES_ACTIVE]) {
+		int all_slaves_active =
+			nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]);
+
+		bond_opt_initval(&newval, all_slaves_active);
+		err = __bond_opt_set(bond, BOND_OPT_ALL_SLAVES_ACTIVE, &newval);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_MIN_LINKS]) {
+		int min_links =
+			nla_get_u32(data[IFLA_BOND_MIN_LINKS]);
+
+		bond_opt_initval(&newval, min_links);
+		err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_LP_INTERVAL]) {
+		int lp_interval =
+			nla_get_u32(data[IFLA_BOND_LP_INTERVAL]);
+
+		bond_opt_initval(&newval, lp_interval);
+		err = __bond_opt_set(bond, BOND_OPT_LP_INTERVAL, &newval);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_PACKETS_PER_SLAVE]) {
+		int packets_per_slave =
+			nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]);
+
+		bond_opt_initval(&newval, packets_per_slave);
+		err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_AD_LACP_RATE]) {
+		int lacp_rate =
+			nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
+
+		bond_opt_initval(&newval, lacp_rate);
+		err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval);
+		if (err)
+			return err;
+	}
+	if (data[IFLA_BOND_AD_SELECT]) {
+		int ad_select =
+			nla_get_u8(data[IFLA_BOND_AD_SELECT]);
+
+		bond_opt_initval(&newval, ad_select);
+		err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval);
 		if (err)
 			return err;
 	}
@@ -83,7 +366,36 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
 static size_t bond_get_size(const struct net_device *bond_dev)
 {
 	return nla_total_size(sizeof(u8)) +	/* IFLA_BOND_MODE */
-		nla_total_size(sizeof(u32));	/* IFLA_BOND_ACTIVE_SLAVE */
+		nla_total_size(sizeof(u32)) +	/* IFLA_BOND_ACTIVE_SLAVE */
+		nla_total_size(sizeof(u32)) +	/* IFLA_BOND_MIIMON */
+		nla_total_size(sizeof(u32)) +	/* IFLA_BOND_UPDELAY */
+		nla_total_size(sizeof(u32)) +	/* IFLA_BOND_DOWNDELAY */
+		nla_total_size(sizeof(u8)) +	/* IFLA_BOND_USE_CARRIER */
+		nla_total_size(sizeof(u32)) +	/* IFLA_BOND_ARP_INTERVAL */
+						/* IFLA_BOND_ARP_IP_TARGET */
+		nla_total_size(sizeof(struct nlattr)) +
+		nla_total_size(sizeof(u32)) * BOND_MAX_ARP_TARGETS +
+		nla_total_size(sizeof(u32)) +	/* IFLA_BOND_ARP_VALIDATE */
+		nla_total_size(sizeof(u32)) +	/* IFLA_BOND_ARP_ALL_TARGETS */
+		nla_total_size(sizeof(u32)) +	/* IFLA_BOND_PRIMARY */
+		nla_total_size(sizeof(u8)) +	/* IFLA_BOND_PRIMARY_RESELECT */
+		nla_total_size(sizeof(u8)) +	/* IFLA_BOND_FAIL_OVER_MAC */
+		nla_total_size(sizeof(u8)) +	/* IFLA_BOND_XMIT_HASH_POLICY */
+		nla_total_size(sizeof(u32)) +	/* IFLA_BOND_RESEND_IGMP */
+		nla_total_size(sizeof(u8)) +	/* IFLA_BOND_NUM_PEER_NOTIF */
+		nla_total_size(sizeof(u8)) +   /* IFLA_BOND_ALL_SLAVES_ACTIVE */
+		nla_total_size(sizeof(u32)) +	/* IFLA_BOND_MIN_LINKS */
+		nla_total_size(sizeof(u32)) +	/* IFLA_BOND_LP_INTERVAL */
+		nla_total_size(sizeof(u32)) +  /* IFLA_BOND_PACKETS_PER_SLAVE */
+		nla_total_size(sizeof(u8)) +	/* IFLA_BOND_AD_LACP_RATE */
+		nla_total_size(sizeof(u8)) +	/* IFLA_BOND_AD_SELECT */
+		nla_total_size(sizeof(struct nlattr)) + /* IFLA_BOND_AD_INFO */
+		nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_AGGREGATOR */
+		nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_NUM_PORTS */
+		nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */
+		nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/
+		nla_total_size(ETH_ALEN) +    /* IFLA_BOND_AD_INFO_PARTNER_MAC*/
+		0;
 }
 
 static int bond_fill_info(struct sk_buff *skb,
@@ -91,11 +403,139 @@ static int bond_fill_info(struct sk_buff *skb,
 {
 	struct bonding *bond = netdev_priv(bond_dev);
 	struct net_device *slave_dev = bond_option_active_slave_get(bond);
+	struct nlattr *targets;
+	unsigned int packets_per_slave;
+	int i, targets_added;
+
+	if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode))
+		goto nla_put_failure;
+
+	if (slave_dev &&
+	    nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex))
+		goto nla_put_failure;
+
+	if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon))
+		goto nla_put_failure;
+
+	if (nla_put_u32(skb, IFLA_BOND_UPDELAY,
+			bond->params.updelay * bond->params.miimon))
+		goto nla_put_failure;
+
+	if (nla_put_u32(skb, IFLA_BOND_DOWNDELAY,
+			bond->params.downdelay * bond->params.miimon))
+		goto nla_put_failure;
+
+	if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
+		goto nla_put_failure;
 
-	if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode) ||
-	    (slave_dev &&
-	     nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex)))
+	if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
 		goto nla_put_failure;
+
+	targets = nla_nest_start(skb, IFLA_BOND_ARP_IP_TARGET);
+	if (!targets)
+		goto nla_put_failure;
+
+	targets_added = 0;
+	for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
+		if (bond->params.arp_targets[i]) {
+			nla_put_be32(skb, i, bond->params.arp_targets[i]);
+			targets_added = 1;
+		}
+	}
+
+	if (targets_added)
+		nla_nest_end(skb, targets);
+	else
+		nla_nest_cancel(skb, targets);
+
+	if (nla_put_u32(skb, IFLA_BOND_ARP_VALIDATE, bond->params.arp_validate))
+		goto nla_put_failure;
+
+	if (nla_put_u32(skb, IFLA_BOND_ARP_ALL_TARGETS,
+			bond->params.arp_all_targets))
+		goto nla_put_failure;
+
+	if (bond->primary_slave &&
+	    nla_put_u32(skb, IFLA_BOND_PRIMARY,
+			bond->primary_slave->dev->ifindex))
+		goto nla_put_failure;
+
+	if (nla_put_u8(skb, IFLA_BOND_PRIMARY_RESELECT,
+		       bond->params.primary_reselect))
+		goto nla_put_failure;
+
+	if (nla_put_u8(skb, IFLA_BOND_FAIL_OVER_MAC,
+		       bond->params.fail_over_mac))
+		goto nla_put_failure;
+
+	if (nla_put_u8(skb, IFLA_BOND_XMIT_HASH_POLICY,
+		       bond->params.xmit_policy))
+		goto nla_put_failure;
+
+	if (nla_put_u32(skb, IFLA_BOND_RESEND_IGMP,
+		        bond->params.resend_igmp))
+		goto nla_put_failure;
+
+	if (nla_put_u8(skb, IFLA_BOND_NUM_PEER_NOTIF,
+		       bond->params.num_peer_notif))
+		goto nla_put_failure;
+
+	if (nla_put_u8(skb, IFLA_BOND_ALL_SLAVES_ACTIVE,
+		       bond->params.all_slaves_active))
+		goto nla_put_failure;
+
+	if (nla_put_u32(skb, IFLA_BOND_MIN_LINKS,
+			bond->params.min_links))
+		goto nla_put_failure;
+
+	if (nla_put_u32(skb, IFLA_BOND_LP_INTERVAL,
+			bond->params.lp_interval))
+		goto nla_put_failure;
+
+	packets_per_slave = bond->params.packets_per_slave;
+	if (nla_put_u32(skb, IFLA_BOND_PACKETS_PER_SLAVE,
+			packets_per_slave))
+		goto nla_put_failure;
+
+	if (nla_put_u8(skb, IFLA_BOND_AD_LACP_RATE,
+		       bond->params.lacp_fast))
+		goto nla_put_failure;
+
+	if (nla_put_u8(skb, IFLA_BOND_AD_SELECT,
+		       bond->params.ad_select))
+		goto nla_put_failure;
+
+	if (bond->params.mode == BOND_MODE_8023AD) {
+		struct ad_info info;
+
+		if (!bond_3ad_get_active_agg_info(bond, &info)) {
+			struct nlattr *nest;
+
+			nest = nla_nest_start(skb, IFLA_BOND_AD_INFO);
+			if (!nest)
+				goto nla_put_failure;
+
+			if (nla_put_u16(skb, IFLA_BOND_AD_INFO_AGGREGATOR,
+					info.aggregator_id))
+				goto nla_put_failure;
+			if (nla_put_u16(skb, IFLA_BOND_AD_INFO_NUM_PORTS,
+					info.ports))
+				goto nla_put_failure;
+			if (nla_put_u16(skb, IFLA_BOND_AD_INFO_ACTOR_KEY,
+					info.actor_key))
+				goto nla_put_failure;
+			if (nla_put_u16(skb, IFLA_BOND_AD_INFO_PARTNER_KEY,
+					info.partner_key))
+				goto nla_put_failure;
+			if (nla_put(skb, IFLA_BOND_AD_INFO_PARTNER_MAC,
+				    sizeof(info.partner_system),
+				    &info.partner_system))
+				goto nla_put_failure;
+
+			nla_nest_end(skb, nest);
+		}
+	}
+
 	return 0;
 
 nla_put_failure:
@@ -116,6 +556,8 @@ struct rtnl_link_ops bond_link_ops __read_mostly = {
 	.get_num_tx_queues	= bond_get_num_tx_queues,
 	.get_num_rx_queues	= bond_get_num_tx_queues, /* Use the same number
 							     as for TX queues */
+	.get_slave_size		= bond_get_slave_size,
+	.fill_slave_info	= bond_fill_slave_info,
 };
 
 int __init bond_netlink_init(void)
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index ea6f640782b7..11cb943222d5 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1,6 +1,7 @@
 /*
  * drivers/net/bond/bond_options.c - bonding options
  * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -15,39 +16,575 @@
 #include <linux/netdevice.h>
 #include <linux/rwlock.h>
 #include <linux/rcupdate.h>
+#include <linux/ctype.h>
+#include <linux/inet.h>
 #include "bonding.h"
 
-static bool bond_mode_is_valid(int mode)
+static struct bond_opt_value bond_mode_tbl[] = {
+	{ "balance-rr",    BOND_MODE_ROUNDROBIN,   BOND_VALFLAG_DEFAULT},
+	{ "active-backup", BOND_MODE_ACTIVEBACKUP, 0},
+	{ "balance-xor",   BOND_MODE_XOR,          0},
+	{ "broadcast",     BOND_MODE_BROADCAST,    0},
+	{ "802.3ad",       BOND_MODE_8023AD,       0},
+	{ "balance-tlb",   BOND_MODE_TLB,          0},
+	{ "balance-alb",   BOND_MODE_ALB,          0},
+	{ NULL,            -1,                     0},
+};
+
+static struct bond_opt_value bond_pps_tbl[] = {
+	{ "default", 1,         BOND_VALFLAG_DEFAULT},
+	{ "maxval",  USHRT_MAX, BOND_VALFLAG_MAX},
+	{ NULL,      -1,        0},
+};
+
+static struct bond_opt_value bond_xmit_hashtype_tbl[] = {
+	{ "layer2",   BOND_XMIT_POLICY_LAYER2, BOND_VALFLAG_DEFAULT},
+	{ "layer3+4", BOND_XMIT_POLICY_LAYER34, 0},
+	{ "layer2+3", BOND_XMIT_POLICY_LAYER23, 0},
+	{ "encap2+3", BOND_XMIT_POLICY_ENCAP23, 0},
+	{ "encap3+4", BOND_XMIT_POLICY_ENCAP34, 0},
+	{ NULL,       -1,                       0},
+};
+
+static struct bond_opt_value bond_arp_validate_tbl[] = {
+	{ "none",   BOND_ARP_VALIDATE_NONE,   BOND_VALFLAG_DEFAULT},
+	{ "active", BOND_ARP_VALIDATE_ACTIVE, 0},
+	{ "backup", BOND_ARP_VALIDATE_BACKUP, 0},
+	{ "all",    BOND_ARP_VALIDATE_ALL,    0},
+	{ NULL,     -1,                       0},
+};
+
+static struct bond_opt_value bond_arp_all_targets_tbl[] = {
+	{ "any", BOND_ARP_TARGETS_ANY, BOND_VALFLAG_DEFAULT},
+	{ "all", BOND_ARP_TARGETS_ALL, 0},
+	{ NULL,  -1,                   0},
+};
+
+static struct bond_opt_value bond_fail_over_mac_tbl[] = {
+	{ "none",   BOND_FOM_NONE,   BOND_VALFLAG_DEFAULT},
+	{ "active", BOND_FOM_ACTIVE, 0},
+	{ "follow", BOND_FOM_FOLLOW, 0},
+	{ NULL,     -1,              0},
+};
+
+static struct bond_opt_value bond_intmax_tbl[] = {
+	{ "off",     0,       BOND_VALFLAG_DEFAULT},
+	{ "maxval",  INT_MAX, BOND_VALFLAG_MAX},
+};
+
+static struct bond_opt_value bond_lacp_rate_tbl[] = {
+	{ "slow", AD_LACP_SLOW, 0},
+	{ "fast", AD_LACP_FAST, 0},
+	{ NULL,   -1,           0},
+};
+
+static struct bond_opt_value bond_ad_select_tbl[] = {
+	{ "stable",    BOND_AD_STABLE,    BOND_VALFLAG_DEFAULT},
+	{ "bandwidth", BOND_AD_BANDWIDTH, 0},
+	{ "count",     BOND_AD_COUNT,     0},
+	{ NULL,        -1,                0},
+};
+
+static struct bond_opt_value bond_num_peer_notif_tbl[] = {
+	{ "off",     0,   0},
+	{ "maxval",  255, BOND_VALFLAG_MAX},
+	{ "default", 1,   BOND_VALFLAG_DEFAULT},
+	{ NULL,      -1,  0}
+};
+
+static struct bond_opt_value bond_primary_reselect_tbl[] = {
+	{ "always",  BOND_PRI_RESELECT_ALWAYS,  BOND_VALFLAG_DEFAULT},
+	{ "better",  BOND_PRI_RESELECT_BETTER,  0},
+	{ "failure", BOND_PRI_RESELECT_FAILURE, 0},
+	{ NULL,      -1},
+};
+
+static struct bond_opt_value bond_use_carrier_tbl[] = {
+	{ "off", 0,  0},
+	{ "on",  1,  BOND_VALFLAG_DEFAULT},
+	{ NULL,  -1, 0}
+};
+
+static struct bond_opt_value bond_all_slaves_active_tbl[] = {
+	{ "off", 0,  BOND_VALFLAG_DEFAULT},
+	{ "on",  1,  0},
+	{ NULL,  -1, 0}
+};
+
+static struct bond_opt_value bond_resend_igmp_tbl[] = {
+	{ "off",     0,   0},
+	{ "maxval",  255, BOND_VALFLAG_MAX},
+	{ "default", 1,   BOND_VALFLAG_DEFAULT},
+	{ NULL,      -1,  0}
+};
+
+static struct bond_opt_value bond_lp_interval_tbl[] = {
+	{ "minval",  1,       BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
+	{ "maxval",  INT_MAX, BOND_VALFLAG_MAX},
+};
+
+static struct bond_option bond_opts[] = {
+	[BOND_OPT_MODE] = {
+		.id = BOND_OPT_MODE,
+		.name = "mode",
+		.desc = "bond device mode",
+		.flags = BOND_OPTFLAG_NOSLAVES | BOND_OPTFLAG_IFDOWN,
+		.values = bond_mode_tbl,
+		.set = bond_option_mode_set
+	},
+	[BOND_OPT_PACKETS_PER_SLAVE] = {
+		.id = BOND_OPT_PACKETS_PER_SLAVE,
+		.name = "packets_per_slave",
+		.desc = "Packets to send per slave in RR mode",
+		.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ROUNDROBIN)),
+		.values = bond_pps_tbl,
+		.set = bond_option_pps_set
+	},
+	[BOND_OPT_XMIT_HASH] = {
+		.id = BOND_OPT_XMIT_HASH,
+		.name = "xmit_hash_policy",
+		.desc = "balance-xor and 802.3ad hashing method",
+		.values = bond_xmit_hashtype_tbl,
+		.set = bond_option_xmit_hash_policy_set
+	},
+	[BOND_OPT_ARP_VALIDATE] = {
+		.id = BOND_OPT_ARP_VALIDATE,
+		.name = "arp_validate",
+		.desc = "validate src/dst of ARP probes",
+		.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP)),
+		.values = bond_arp_validate_tbl,
+		.set = bond_option_arp_validate_set
+	},
+	[BOND_OPT_ARP_ALL_TARGETS] = {
+		.id = BOND_OPT_ARP_ALL_TARGETS,
+		.name = "arp_all_targets",
+		.desc = "fail on any/all arp targets timeout",
+		.values = bond_arp_all_targets_tbl,
+		.set = bond_option_arp_all_targets_set
+	},
+	[BOND_OPT_FAIL_OVER_MAC] = {
+		.id = BOND_OPT_FAIL_OVER_MAC,
+		.name = "fail_over_mac",
+		.desc = "For active-backup, do not set all slaves to the same MAC",
+		.flags = BOND_OPTFLAG_NOSLAVES,
+		.values = bond_fail_over_mac_tbl,
+		.set = bond_option_fail_over_mac_set
+	},
+	[BOND_OPT_ARP_INTERVAL] = {
+		.id = BOND_OPT_ARP_INTERVAL,
+		.name = "arp_interval",
+		.desc = "arp interval in milliseconds",
+		.unsuppmodes = BIT(BOND_MODE_8023AD) | BIT(BOND_MODE_TLB) |
+			       BIT(BOND_MODE_ALB),
+		.values = bond_intmax_tbl,
+		.set = bond_option_arp_interval_set
+	},
+	[BOND_OPT_ARP_TARGETS] = {
+		.id = BOND_OPT_ARP_TARGETS,
+		.name = "arp_ip_target",
+		.desc = "arp targets in n.n.n.n form",
+		.flags = BOND_OPTFLAG_RAWVAL,
+		.set = bond_option_arp_ip_targets_set
+	},
+	[BOND_OPT_DOWNDELAY] = {
+		.id = BOND_OPT_DOWNDELAY,
+		.name = "downdelay",
+		.desc = "Delay before considering link down, in milliseconds",
+		.values = bond_intmax_tbl,
+		.set = bond_option_downdelay_set
+	},
+	[BOND_OPT_UPDELAY] = {
+		.id = BOND_OPT_UPDELAY,
+		.name = "updelay",
+		.desc = "Delay before considering link up, in milliseconds",
+		.values = bond_intmax_tbl,
+		.set = bond_option_updelay_set
+	},
+	[BOND_OPT_LACP_RATE] = {
+		.id = BOND_OPT_LACP_RATE,
+		.name = "lacp_rate",
+		.desc = "LACPDU tx rate to request from 802.3ad partner",
+		.flags = BOND_OPTFLAG_IFDOWN,
+		.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+		.values = bond_lacp_rate_tbl,
+		.set = bond_option_lacp_rate_set
+	},
+	[BOND_OPT_MINLINKS] = {
+		.id = BOND_OPT_MINLINKS,
+		.name = "min_links",
+		.desc = "Minimum number of available links before turning on carrier",
+		.values = bond_intmax_tbl,
+		.set = bond_option_min_links_set
+	},
+	[BOND_OPT_AD_SELECT] = {
+		.id = BOND_OPT_AD_SELECT,
+		.name = "ad_select",
+		.desc = "803.ad aggregation selection logic",
+		.flags = BOND_OPTFLAG_IFDOWN,
+		.values = bond_ad_select_tbl,
+		.set = bond_option_ad_select_set
+	},
+	[BOND_OPT_NUM_PEER_NOTIF] = {
+		.id = BOND_OPT_NUM_PEER_NOTIF,
+		.name = "num_unsol_na",
+		.desc = "Number of peer notifications to send on failover event",
+		.values = bond_num_peer_notif_tbl,
+		.set = bond_option_num_peer_notif_set
+	},
+	[BOND_OPT_MIIMON] = {
+		.id = BOND_OPT_MIIMON,
+		.name = "miimon",
+		.desc = "Link check interval in milliseconds",
+		.values = bond_intmax_tbl,
+		.set = bond_option_miimon_set
+	},
+	[BOND_OPT_PRIMARY] = {
+		.id = BOND_OPT_PRIMARY,
+		.name = "primary",
+		.desc = "Primary network device to use",
+		.flags = BOND_OPTFLAG_RAWVAL,
+		.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP) |
+						BIT(BOND_MODE_TLB) |
+						BIT(BOND_MODE_ALB)),
+		.set = bond_option_primary_set
+	},
+	[BOND_OPT_PRIMARY_RESELECT] = {
+		.id = BOND_OPT_PRIMARY_RESELECT,
+		.name = "primary_reselect",
+		.desc = "Reselect primary slave once it comes up",
+		.values = bond_primary_reselect_tbl,
+		.set = bond_option_primary_reselect_set
+	},
+	[BOND_OPT_USE_CARRIER] = {
+		.id = BOND_OPT_USE_CARRIER,
+		.name = "use_carrier",
+		.desc = "Use netif_carrier_ok (vs MII ioctls) in miimon",
+		.values = bond_use_carrier_tbl,
+		.set = bond_option_use_carrier_set
+	},
+	[BOND_OPT_ACTIVE_SLAVE] = {
+		.id = BOND_OPT_ACTIVE_SLAVE,
+		.name = "active_slave",
+		.desc = "Currently active slave",
+		.flags = BOND_OPTFLAG_RAWVAL,
+		.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP) |
+						BIT(BOND_MODE_TLB) |
+						BIT(BOND_MODE_ALB)),
+		.set = bond_option_active_slave_set
+	},
+	[BOND_OPT_QUEUE_ID] = {
+		.id = BOND_OPT_QUEUE_ID,
+		.name = "queue_id",
+		.desc = "Set queue id of a slave",
+		.flags = BOND_OPTFLAG_RAWVAL,
+		.set = bond_option_queue_id_set
+	},
+	[BOND_OPT_ALL_SLAVES_ACTIVE] = {
+		.id = BOND_OPT_ALL_SLAVES_ACTIVE,
+		.name = "all_slaves_active",
+		.desc = "Keep all frames received on an interface by setting active flag for all slaves",
+		.values = bond_all_slaves_active_tbl,
+		.set = bond_option_all_slaves_active_set
+	},
+	[BOND_OPT_RESEND_IGMP] = {
+		.id = BOND_OPT_RESEND_IGMP,
+		.name = "resend_igmp",
+		.desc = "Number of IGMP membership reports to send on link failure",
+		.values = bond_resend_igmp_tbl,
+		.set = bond_option_resend_igmp_set
+	},
+	[BOND_OPT_LP_INTERVAL] = {
+		.id = BOND_OPT_LP_INTERVAL,
+		.name = "lp_interval",
+		.desc = "The number of seconds between instances where the bonding driver sends learning packets to each slave's peer switch",
+		.values = bond_lp_interval_tbl,
+		.set = bond_option_lp_interval_set
+	},
+	[BOND_OPT_SLAVES] = {
+		.id = BOND_OPT_SLAVES,
+		.name = "slaves",
+		.desc = "Slave membership management",
+		.flags = BOND_OPTFLAG_RAWVAL,
+		.set = bond_option_slaves_set
+	},
+	{ }
+};
+
+/* Searches for a value in opt's values[] table */
+struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
 {
+	struct bond_option *opt;
 	int i;
 
-	for (i = 0; bond_mode_tbl[i].modename; i++);
+	opt = bond_opt_get(option);
+	if (WARN_ON(!opt))
+		return NULL;
+	for (i = 0; opt->values && opt->values[i].string; i++)
+		if (opt->values[i].value == val)
+			return &opt->values[i];
 
-	return mode >= 0 && mode < i;
+	return NULL;
 }
 
-int bond_option_mode_set(struct bonding *bond, int mode)
+/* Searches for a value in opt's values[] table which matches the flagmask */
+static struct bond_opt_value *bond_opt_get_flags(const struct bond_option *opt,
+						 u32 flagmask)
 {
-	if (!bond_mode_is_valid(mode)) {
-		pr_err("invalid mode value %d.\n", mode);
-		return -EINVAL;
+	int i;
+
+	for (i = 0; opt->values && opt->values[i].string; i++)
+		if (opt->values[i].flags & flagmask)
+			return &opt->values[i];
+
+	return NULL;
+}
+
+/* If maxval is missing then there's no range to check. In case minval is
+ * missing then it's considered to be 0.
+ */
+static bool bond_opt_check_range(const struct bond_option *opt, u64 val)
+{
+	struct bond_opt_value *minval, *maxval;
+
+	minval = bond_opt_get_flags(opt, BOND_VALFLAG_MIN);
+	maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
+	if (!maxval || (minval && val < minval->value) || val > maxval->value)
+		return false;
+
+	return true;
+}
+
+/**
+ * bond_opt_parse - parse option value
+ * @opt: the option to parse against
+ * @val: value to parse
+ *
+ * This function tries to extract the value from @val and check if it's
+ * a possible match for the option and returns NULL if a match isn't found,
+ * or the struct_opt_value that matched. It also strips the new line from
+ * @val->string if it's present.
+ */
+struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
+				      struct bond_opt_value *val)
+{
+	char *p, valstr[BOND_OPT_MAX_NAMELEN + 1] = { 0, };
+	struct bond_opt_value *tbl, *ret = NULL;
+	bool checkval;
+	int i, rv;
+
+	/* No parsing if the option wants a raw val */
+	if (opt->flags & BOND_OPTFLAG_RAWVAL)
+		return val;
+
+	tbl = opt->values;
+	if (!tbl)
+		goto out;
+
+	/* ULLONG_MAX is used to bypass string processing */
+	checkval = val->value != ULLONG_MAX;
+	if (!checkval) {
+		if (!val->string)
+			goto out;
+		p = strchr(val->string, '\n');
+		if (p)
+			*p = '\0';
+		for (p = val->string; *p; p++)
+			if (!(isdigit(*p) || isspace(*p)))
+				break;
+		/* The following code extracts the string to match or the value
+		 * and sets checkval appropriately
+		 */
+		if (*p) {
+			rv = sscanf(val->string, "%32s", valstr);
+		} else {
+			rv = sscanf(val->string, "%llu", &val->value);
+			checkval = true;
+		}
+		if (!rv)
+			goto out;
 	}
 
-	if (bond->dev->flags & IFF_UP) {
-		pr_err("%s: unable to update mode because interface is up.\n",
-		       bond->dev->name);
-		return -EPERM;
+	for (i = 0; tbl[i].string; i++) {
+		/* Check for exact match */
+		if (checkval) {
+			if (val->value == tbl[i].value)
+				ret = &tbl[i];
+		} else {
+			if (!strcmp(valstr, "default") &&
+			    (tbl[i].flags & BOND_VALFLAG_DEFAULT))
+				ret = &tbl[i];
+
+			if (!strcmp(valstr, tbl[i].string))
+				ret = &tbl[i];
+		}
+		/* Found an exact match */
+		if (ret)
+			goto out;
 	}
+	/* Possible range match */
+	if (checkval && bond_opt_check_range(opt, val->value))
+		ret = val;
+out:
+	return ret;
+}
 
-	if (bond_has_slaves(bond)) {
-		pr_err("%s: unable to update mode because bond has slaves.\n",
-			bond->dev->name);
-		return -EPERM;
+/* Check opt's dependencies against bond mode and currently set options */
+static int bond_opt_check_deps(struct bonding *bond,
+			       const struct bond_option *opt)
+{
+	struct bond_params *params = &bond->params;
+
+	if (test_bit(params->mode, &opt->unsuppmodes))
+		return -EACCES;
+	if ((opt->flags & BOND_OPTFLAG_NOSLAVES) && bond_has_slaves(bond))
+		return -ENOTEMPTY;
+	if ((opt->flags & BOND_OPTFLAG_IFDOWN) && (bond->dev->flags & IFF_UP))
+		return -EBUSY;
+
+	return 0;
+}
+
+static void bond_opt_dep_print(struct bonding *bond,
+			       const struct bond_option *opt)
+{
+	struct bond_opt_value *modeval;
+	struct bond_params *params;
+
+	params = &bond->params;
+	modeval = bond_opt_get_val(BOND_OPT_MODE, params->mode);
+	if (test_bit(params->mode, &opt->unsuppmodes))
+		pr_err("%s: option %s: mode dependency failed, not supported in mode %s(%llu)\n",
+		       bond->dev->name, opt->name,
+		       modeval->string, modeval->value);
+}
+
+static void bond_opt_error_interpret(struct bonding *bond,
+				     const struct bond_option *opt,
+				     int error, struct bond_opt_value *val)
+{
+	struct bond_opt_value *minval, *maxval;
+	char *p;
+
+	switch (error) {
+	case -EINVAL:
+		if (val) {
+			if (val->string) {
+				/* sometimes RAWVAL opts may have new lines */
+				p = strchr(val->string, '\n');
+				if (p)
+					*p = '\0';
+				pr_err("%s: option %s: invalid value (%s).\n",
+				       bond->dev->name, opt->name, val->string);
+			} else {
+				pr_err("%s: option %s: invalid value (%llu).\n",
+				       bond->dev->name, opt->name, val->value);
+			}
+		}
+		minval = bond_opt_get_flags(opt, BOND_VALFLAG_MIN);
+		maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
+		if (!maxval)
+			break;
+		pr_err("%s: option %s: allowed values %llu - %llu.\n",
+		       bond->dev->name, opt->name, minval ? minval->value : 0,
+		       maxval->value);
+		break;
+	case -EACCES:
+		bond_opt_dep_print(bond, opt);
+		break;
+	case -ENOTEMPTY:
+		pr_err("%s: option %s: unable to set because the bond device has slaves.\n",
+		       bond->dev->name, opt->name);
+		break;
+	case -EBUSY:
+		pr_err("%s: option %s: unable to set because the bond device is up.\n",
+		       bond->dev->name, opt->name);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * __bond_opt_set - set a bonding option
+ * @bond: target bond device
+ * @option: option to set
+ * @val: value to set it to
+ *
+ * This function is used to change the bond's option value, it can be
+ * used for both enabling/changing an option and for disabling it. RTNL lock
+ * must be obtained before calling this function.
+ */
+int __bond_opt_set(struct bonding *bond,
+		   unsigned int option, struct bond_opt_value *val)
+{
+	struct bond_opt_value *retval = NULL;
+	const struct bond_option *opt;
+	int ret = -ENOENT;
+
+	ASSERT_RTNL();
+
+	opt = bond_opt_get(option);
+	if (WARN_ON(!val) || WARN_ON(!opt))
+		goto out;
+	ret = bond_opt_check_deps(bond, opt);
+	if (ret)
+		goto out;
+	retval = bond_opt_parse(opt, val);
+	if (!retval) {
+		ret = -EINVAL;
+		goto out;
 	}
+	ret = opt->set(bond, retval);
+out:
+	if (ret)
+		bond_opt_error_interpret(bond, opt, ret, val);
 
-	if (BOND_NO_USES_ARP(mode) && bond->params.arp_interval) {
+	return ret;
+}
+
+/**
+ * bond_opt_tryset_rtnl - try to acquire rtnl and call __bond_opt_set
+ * @bond: target bond device
+ * @option: option to set
+ * @buf: value to set it to
+ *
+ * This function tries to acquire RTNL without blocking and if successful
+ * calls __bond_opt_set. It is mainly used for sysfs option manipulation.
+ */
+int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf)
+{
+	struct bond_opt_value optval;
+	int ret;
+
+	if (!rtnl_trylock())
+		return restart_syscall();
+	bond_opt_initstr(&optval, buf);
+	ret = __bond_opt_set(bond, option, &optval);
+	rtnl_unlock();
+
+	return ret;
+}
+
+/**
+ * bond_opt_get - get a pointer to an option
+ * @option: option for which to return a pointer
+ *
+ * This function checks if option is valid and if so returns a pointer
+ * to its entry in the bond_opts[] option array.
+ */
+struct bond_option *bond_opt_get(unsigned int option)
+{
+	if (!BOND_OPT_VALID(option))
+		return NULL;
+
+	return &bond_opts[option];
+}
+
+int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval)
+{
+	if (BOND_NO_USES_ARP(newval->value) && bond->params.arp_interval) {
 		pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
-			bond->dev->name, bond_mode_tbl[mode].modename);
+			bond->dev->name, newval->string);
 		/* disable arp monitoring */
 		bond->params.arp_interval = 0;
 		/* set miimon to default value */
@@ -58,7 +595,8 @@ int bond_option_mode_set(struct bonding *bond, int mode)
 
 	/* don't cache arp_validate between modes */
 	bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
-	bond->params.mode = mode;
+	bond->params.mode = newval->value;
+
 	return 0;
 }
 
@@ -81,10 +619,21 @@ struct net_device *bond_option_active_slave_get(struct bonding *bond)
 }
 
 int bond_option_active_slave_set(struct bonding *bond,
-				 struct net_device *slave_dev)
+				 struct bond_opt_value *newval)
 {
+	char ifname[IFNAMSIZ] = { 0, };
+	struct net_device *slave_dev;
 	int ret = 0;
 
+	sscanf(newval->string, "%15s", ifname); /* IFNAMSIZ */
+	if (!strlen(ifname) || newval->string[0] == '\n') {
+		slave_dev = NULL;
+	} else {
+		slave_dev = __dev_get_by_name(dev_net(bond->dev), ifname);
+		if (!slave_dev)
+			return -ENODEV;
+	}
+
 	if (slave_dev) {
 		if (!netif_is_bond_slave(slave_dev)) {
 			pr_err("Device %s is not bonding slave.\n",
@@ -99,14 +648,7 @@ int bond_option_active_slave_set(struct bonding *bond,
 		}
 	}
 
-	if (!USES_PRIMARY(bond->params.mode)) {
-		pr_err("%s: Unable to change active slave; %s is in mode %d\n",
-		       bond->dev->name, bond->dev->name, bond->params.mode);
-		return -EINVAL;
-	}
-
 	block_netpoll_tx();
-	read_lock(&bond->lock);
 	write_lock_bh(&bond->curr_slave_lock);
 
 	/* check to see if we are clearing active */
@@ -141,7 +683,604 @@ int bond_option_active_slave_set(struct bonding *bond,
 	}
 
 	write_unlock_bh(&bond->curr_slave_lock);
+	unblock_netpoll_tx();
+
+	return ret;
+}
+
+int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval)
+{
+	pr_info("%s: Setting MII monitoring interval to %llu.\n",
+		bond->dev->name, newval->value);
+	bond->params.miimon = newval->value;
+	if (bond->params.updelay)
+		pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+			bond->dev->name,
+			bond->params.updelay * bond->params.miimon);
+	if (bond->params.downdelay)
+		pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+			bond->dev->name,
+			bond->params.downdelay * bond->params.miimon);
+	if (newval->value && bond->params.arp_interval) {
+		pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+			bond->dev->name);
+		bond->params.arp_interval = 0;
+		if (bond->params.arp_validate)
+			bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
+	}
+	if (bond->dev->flags & IFF_UP) {
+		/* If the interface is up, we may need to fire off
+		 * the MII timer. If the interface is down, the
+		 * timer will get fired off when the open function
+		 * is called.
+		 */
+		if (!newval->value) {
+			cancel_delayed_work_sync(&bond->mii_work);
+		} else {
+			cancel_delayed_work_sync(&bond->arp_work);
+			queue_delayed_work(bond->wq, &bond->mii_work, 0);
+		}
+	}
+
+	return 0;
+}
+
+int bond_option_updelay_set(struct bonding *bond, struct bond_opt_value *newval)
+{
+	int value = newval->value;
+
+	if (!bond->params.miimon) {
+		pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
+		       bond->dev->name);
+		return -EPERM;
+	}
+	if ((value % bond->params.miimon) != 0) {
+		pr_warn("%s: Warning: up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
+			bond->dev->name, value,
+			bond->params.miimon,
+			(value / bond->params.miimon) *
+			bond->params.miimon);
+	}
+	bond->params.updelay = value / bond->params.miimon;
+	pr_info("%s: Setting up delay to %d.\n",
+		bond->dev->name,
+		bond->params.updelay * bond->params.miimon);
+
+	return 0;
+}
+
+int bond_option_downdelay_set(struct bonding *bond,
+			      struct bond_opt_value *newval)
+{
+	int value = newval->value;
+
+	if (!bond->params.miimon) {
+		pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
+		       bond->dev->name);
+		return -EPERM;
+	}
+	if ((value % bond->params.miimon) != 0) {
+		pr_warn("%s: Warning: down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n",
+			bond->dev->name, value,
+			bond->params.miimon,
+			(value / bond->params.miimon) *
+			bond->params.miimon);
+	}
+	bond->params.downdelay = value / bond->params.miimon;
+	pr_info("%s: Setting down delay to %d.\n",
+		bond->dev->name,
+		bond->params.downdelay * bond->params.miimon);
+
+	return 0;
+}
+
+int bond_option_use_carrier_set(struct bonding *bond,
+				struct bond_opt_value *newval)
+{
+	pr_info("%s: Setting use_carrier to %llu.\n",
+		bond->dev->name, newval->value);
+	bond->params.use_carrier = newval->value;
+
+	return 0;
+}
+
+int bond_option_arp_interval_set(struct bonding *bond,
+				 struct bond_opt_value *newval)
+{
+	pr_info("%s: Setting ARP monitoring interval to %llu.\n",
+		bond->dev->name, newval->value);
+	bond->params.arp_interval = newval->value;
+	if (newval->value) {
+		if (bond->params.miimon) {
+			pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+				bond->dev->name, bond->dev->name);
+			bond->params.miimon = 0;
+		}
+		if (!bond->params.arp_targets[0])
+			pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+				bond->dev->name);
+	}
+	if (bond->dev->flags & IFF_UP) {
+		/* If the interface is up, we may need to fire off
+		 * the ARP timer.  If the interface is down, the
+		 * timer will get fired off when the open function
+		 * is called.
+		 */
+		if (!newval->value) {
+			if (bond->params.arp_validate)
+				bond->recv_probe = NULL;
+			cancel_delayed_work_sync(&bond->arp_work);
+		} else {
+			/* arp_validate can be set only in active-backup mode */
+			if (bond->params.arp_validate)
+				bond->recv_probe = bond_arp_rcv;
+			cancel_delayed_work_sync(&bond->mii_work);
+			queue_delayed_work(bond->wq, &bond->arp_work, 0);
+		}
+	}
+
+	return 0;
+}
+
+static void _bond_options_arp_ip_target_set(struct bonding *bond, int slot,
+					    __be32 target,
+					    unsigned long last_rx)
+{
+	__be32 *targets = bond->params.arp_targets;
+	struct list_head *iter;
+	struct slave *slave;
+
+	if (slot >= 0 && slot < BOND_MAX_ARP_TARGETS) {
+		bond_for_each_slave(bond, slave, iter)
+			slave->target_last_arp_rx[slot] = last_rx;
+		targets[slot] = target;
+	}
+}
+
+static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
+{
+	__be32 *targets = bond->params.arp_targets;
+	int ind;
+
+	if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
+		pr_err("%s: invalid ARP target %pI4 specified for addition\n",
+		       bond->dev->name, &target);
+		return -EINVAL;
+	}
+
+	if (bond_get_targets_ip(targets, target) != -1) { /* dup */
+		pr_err("%s: ARP target %pI4 is already present\n",
+		       bond->dev->name, &target);
+		return -EINVAL;
+	}
+
+	ind = bond_get_targets_ip(targets, 0); /* first free slot */
+	if (ind == -1) {
+		pr_err("%s: ARP target table is full!\n",
+		       bond->dev->name);
+		return -EINVAL;
+	}
+
+	pr_info("%s: adding ARP target %pI4.\n", bond->dev->name, &target);
+
+	_bond_options_arp_ip_target_set(bond, ind, target, jiffies);
+
+	return 0;
+}
+
+int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
+{
+	int ret;
+
+	/* not to race with bond_arp_rcv */
+	write_lock_bh(&bond->lock);
+	ret = _bond_option_arp_ip_target_add(bond, target);
+	write_unlock_bh(&bond->lock);
+
+	return ret;
+}
+
+int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
+{
+	__be32 *targets = bond->params.arp_targets;
+	struct list_head *iter;
+	struct slave *slave;
+	unsigned long *targets_rx;
+	int ind, i;
+
+	if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
+		pr_err("%s: invalid ARP target %pI4 specified for removal\n",
+		       bond->dev->name, &target);
+		return -EINVAL;
+	}
+
+	ind = bond_get_targets_ip(targets, target);
+	if (ind == -1) {
+		pr_err("%s: unable to remove nonexistent ARP target %pI4.\n",
+		       bond->dev->name, &target);
+		return -EINVAL;
+	}
+
+	if (ind == 0 && !targets[1] && bond->params.arp_interval)
+		pr_warn("%s: removing last arp target with arp_interval on\n",
+			bond->dev->name);
+
+	pr_info("%s: removing ARP target %pI4.\n", bond->dev->name,
+		&target);
+
+	/* not to race with bond_arp_rcv */
+	write_lock_bh(&bond->lock);
+
+	bond_for_each_slave(bond, slave, iter) {
+		targets_rx = slave->target_last_arp_rx;
+		for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
+			targets_rx[i] = targets_rx[i+1];
+		targets_rx[i] = 0;
+	}
+	for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
+		targets[i] = targets[i+1];
+	targets[i] = 0;
+
+	write_unlock_bh(&bond->lock);
+
+	return 0;
+}
+
+void bond_option_arp_ip_targets_clear(struct bonding *bond)
+{
+	int i;
+
+	/* not to race with bond_arp_rcv */
+	write_lock_bh(&bond->lock);
+	for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
+		_bond_options_arp_ip_target_set(bond, i, 0, 0);
+	write_unlock_bh(&bond->lock);
+}
+
+int bond_option_arp_ip_targets_set(struct bonding *bond,
+				   struct bond_opt_value *newval)
+{
+	int ret = -EPERM;
+	__be32 target;
+
+	if (newval->string) {
+		if (!in4_pton(newval->string+1, -1, (u8 *)&target, -1, NULL)) {
+			pr_err("%s: invalid ARP target %pI4 specified\n",
+			       bond->dev->name, &target);
+			return ret;
+		}
+		if (newval->string[0] == '+')
+			ret = bond_option_arp_ip_target_add(bond, target);
+		else if (newval->string[0] == '-')
+			ret = bond_option_arp_ip_target_rem(bond, target);
+		else
+			pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
+			       bond->dev->name);
+	} else {
+		target = newval->value;
+		ret = bond_option_arp_ip_target_add(bond, target);
+	}
+
+	return ret;
+}
+
+int bond_option_arp_validate_set(struct bonding *bond,
+				 struct bond_opt_value *newval)
+{
+	pr_info("%s: setting arp_validate to %s (%llu).\n",
+		bond->dev->name, newval->string, newval->value);
+
+	if (bond->dev->flags & IFF_UP) {
+		if (!newval->value)
+			bond->recv_probe = NULL;
+		else if (bond->params.arp_interval)
+			bond->recv_probe = bond_arp_rcv;
+	}
+	bond->params.arp_validate = newval->value;
+
+	return 0;
+}
+
+int bond_option_arp_all_targets_set(struct bonding *bond,
+				    struct bond_opt_value *newval)
+{
+	pr_info("%s: setting arp_all_targets to %s (%llu).\n",
+		bond->dev->name, newval->string, newval->value);
+	bond->params.arp_all_targets = newval->value;
+
+	return 0;
+}
+
+int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
+{
+	char *p, *primary = newval->string;
+	struct list_head *iter;
+	struct slave *slave;
+
+	block_netpoll_tx();
+	read_lock(&bond->lock);
+	write_lock_bh(&bond->curr_slave_lock);
+
+	p = strchr(primary, '\n');
+	if (p)
+		*p = '\0';
+	/* check to see if we are clearing primary */
+	if (!strlen(primary)) {
+		pr_info("%s: Setting primary slave to None.\n",
+			bond->dev->name);
+		bond->primary_slave = NULL;
+		memset(bond->params.primary, 0, sizeof(bond->params.primary));
+		bond_select_active_slave(bond);
+		goto out;
+	}
+
+	bond_for_each_slave(bond, slave, iter) {
+		if (strncmp(slave->dev->name, primary, IFNAMSIZ) == 0) {
+			pr_info("%s: Setting %s as primary slave.\n",
+				bond->dev->name, slave->dev->name);
+			bond->primary_slave = slave;
+			strcpy(bond->params.primary, slave->dev->name);
+			bond_select_active_slave(bond);
+			goto out;
+		}
+	}
+
+	if (bond->primary_slave) {
+		pr_info("%s: Setting primary slave to None.\n",
+			bond->dev->name);
+		bond->primary_slave = NULL;
+		bond_select_active_slave(bond);
+	}
+	strncpy(bond->params.primary, primary, IFNAMSIZ);
+	bond->params.primary[IFNAMSIZ - 1] = 0;
+
+	pr_info("%s: Recording %s as primary, but it has not been enslaved to %s yet.\n",
+		bond->dev->name, primary, bond->dev->name);
+
+out:
+	write_unlock_bh(&bond->curr_slave_lock);
 	read_unlock(&bond->lock);
 	unblock_netpoll_tx();
+
+	return 0;
+}
+
+int bond_option_primary_reselect_set(struct bonding *bond,
+				     struct bond_opt_value *newval)
+{
+	pr_info("%s: setting primary_reselect to %s (%llu).\n",
+		bond->dev->name, newval->string, newval->value);
+	bond->params.primary_reselect = newval->value;
+
+	block_netpoll_tx();
+	write_lock_bh(&bond->curr_slave_lock);
+	bond_select_active_slave(bond);
+	write_unlock_bh(&bond->curr_slave_lock);
+	unblock_netpoll_tx();
+
+	return 0;
+}
+
+int bond_option_fail_over_mac_set(struct bonding *bond,
+				  struct bond_opt_value *newval)
+{
+	pr_info("%s: Setting fail_over_mac to %s (%llu).\n",
+		bond->dev->name, newval->string, newval->value);
+	bond->params.fail_over_mac = newval->value;
+
+	return 0;
+}
+
+int bond_option_xmit_hash_policy_set(struct bonding *bond,
+				     struct bond_opt_value *newval)
+{
+	pr_info("%s: setting xmit hash policy to %s (%llu).\n",
+		bond->dev->name, newval->string, newval->value);
+	bond->params.xmit_policy = newval->value;
+
+	return 0;
+}
+
+int bond_option_resend_igmp_set(struct bonding *bond,
+				struct bond_opt_value *newval)
+{
+	pr_info("%s: Setting resend_igmp to %llu.\n",
+		bond->dev->name, newval->value);
+	bond->params.resend_igmp = newval->value;
+
+	return 0;
+}
+
+int bond_option_num_peer_notif_set(struct bonding *bond,
+				   struct bond_opt_value *newval)
+{
+	bond->params.num_peer_notif = newval->value;
+
+	return 0;
+}
+
+int bond_option_all_slaves_active_set(struct bonding *bond,
+				      struct bond_opt_value *newval)
+{
+	struct list_head *iter;
+	struct slave *slave;
+
+	if (newval->value == bond->params.all_slaves_active)
+		return 0;
+	bond->params.all_slaves_active = newval->value;
+	bond_for_each_slave(bond, slave, iter) {
+		if (!bond_is_active_slave(slave)) {
+			if (newval->value)
+				slave->inactive = 0;
+			else
+				slave->inactive = 1;
+		}
+	}
+
+	return 0;
+}
+
+int bond_option_min_links_set(struct bonding *bond,
+			      struct bond_opt_value *newval)
+{
+	pr_info("%s: Setting min links value to %llu\n",
+		bond->dev->name, newval->value);
+	bond->params.min_links = newval->value;
+
+	return 0;
+}
+
+int bond_option_lp_interval_set(struct bonding *bond,
+				struct bond_opt_value *newval)
+{
+	bond->params.lp_interval = newval->value;
+
+	return 0;
+}
+
+int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval)
+{
+	bond->params.packets_per_slave = newval->value;
+	if (newval->value > 0) {
+		bond->params.reciprocal_packets_per_slave =
+			reciprocal_value(newval->value);
+	} else {
+		/* reciprocal_packets_per_slave is unused if
+		 * packets_per_slave is 0 or 1, just initialize it
+		 */
+		bond->params.reciprocal_packets_per_slave =
+			(struct reciprocal_value) { 0 };
+	}
+
+	return 0;
+}
+
+int bond_option_lacp_rate_set(struct bonding *bond,
+			      struct bond_opt_value *newval)
+{
+	pr_info("%s: Setting LACP rate to %s (%llu).\n",
+		bond->dev->name, newval->string, newval->value);
+	bond->params.lacp_fast = newval->value;
+	bond_3ad_update_lacp_rate(bond);
+
+	return 0;
+}
+
+int bond_option_ad_select_set(struct bonding *bond,
+			      struct bond_opt_value *newval)
+{
+	pr_info("%s: Setting ad_select to %s (%llu).\n",
+		bond->dev->name, newval->string, newval->value);
+	bond->params.ad_select = newval->value;
+
+	return 0;
+}
+
+int bond_option_queue_id_set(struct bonding *bond,
+			     struct bond_opt_value *newval)
+{
+	struct slave *slave, *update_slave;
+	struct net_device *sdev;
+	struct list_head *iter;
+	char *delim;
+	int ret = 0;
+	u16 qid;
+
+	/* delim will point to queue id if successful */
+	delim = strchr(newval->string, ':');
+	if (!delim)
+		goto err_no_cmd;
+
+	/* Terminate string that points to device name and bump it
+	 * up one, so we can read the queue id there.
+	 */
+	*delim = '\0';
+	if (sscanf(++delim, "%hd\n", &qid) != 1)
+		goto err_no_cmd;
+
+	/* Check buffer length, valid ifname and queue id */
+	if (strlen(newval->string) > IFNAMSIZ ||
+	    !dev_valid_name(newval->string) ||
+	    qid > bond->dev->real_num_tx_queues)
+		goto err_no_cmd;
+
+	/* Get the pointer to that interface if it exists */
+	sdev = __dev_get_by_name(dev_net(bond->dev), newval->string);
+	if (!sdev)
+		goto err_no_cmd;
+
+	/* Search for thes slave and check for duplicate qids */
+	update_slave = NULL;
+	bond_for_each_slave(bond, slave, iter) {
+		if (sdev == slave->dev)
+			/* We don't need to check the matching
+			 * slave for dups, since we're overwriting it
+			 */
+			update_slave = slave;
+		else if (qid && qid == slave->queue_id) {
+			goto err_no_cmd;
+		}
+	}
+
+	if (!update_slave)
+		goto err_no_cmd;
+
+	/* Actually set the qids for the slave */
+	update_slave->queue_id = qid;
+
+out:
 	return ret;
+
+err_no_cmd:
+	pr_info("invalid input for queue_id set for %s.\n",
+		bond->dev->name);
+	ret = -EPERM;
+	goto out;
+
+}
+
+int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval)
+{
+	char command[IFNAMSIZ + 1] = { 0, };
+	struct net_device *dev;
+	char *ifname;
+	int ret;
+
+	sscanf(newval->string, "%16s", command); /* IFNAMSIZ*/
+	ifname = command + 1;
+	if ((strlen(command) <= 1) ||
+	    !dev_valid_name(ifname))
+		goto err_no_cmd;
+
+	dev = __dev_get_by_name(dev_net(bond->dev), ifname);
+	if (!dev) {
+		pr_info("%s: Interface %s does not exist!\n",
+			bond->dev->name, ifname);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	switch (command[0]) {
+	case '+':
+		pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name);
+		ret = bond_enslave(bond->dev, dev);
+		break;
+
+	case '-':
+		pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name);
+		ret = bond_release(bond->dev, dev);
+		break;
+
+	default:
+		goto err_no_cmd;
+	}
+
+out:
+	return ret;
+
+err_no_cmd:
+	pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
+	       bond->dev->name);
+	ret = -EPERM;
+	goto out;
 }
diff --git a/drivers/net/bonding/bond_options.h b/drivers/net/bonding/bond_options.h
new file mode 100644
index 000000000000..433d37f6940b
--- /dev/null
+++ b/drivers/net/bonding/bond_options.h
@@ -0,0 +1,170 @@
+/*
+ * drivers/net/bond/bond_options.h - bonding options
+ * Copyright (c) 2013 Nikolay Aleksandrov <nikolay@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _BOND_OPTIONS_H
+#define _BOND_OPTIONS_H
+
+#define BOND_OPT_MAX_NAMELEN 32
+#define BOND_OPT_VALID(opt) ((opt) < BOND_OPT_LAST)
+#define BOND_MODE_ALL_EX(x) (~(x))
+
+/* Option flags:
+ * BOND_OPTFLAG_NOSLAVES - check if the bond device is empty before setting
+ * BOND_OPTFLAG_IFDOWN - check if the bond device is down before setting
+ * BOND_OPTFLAG_RAWVAL - the option parses the value itself
+ */
+enum {
+	BOND_OPTFLAG_NOSLAVES	= BIT(0),
+	BOND_OPTFLAG_IFDOWN	= BIT(1),
+	BOND_OPTFLAG_RAWVAL	= BIT(2)
+};
+
+/* Value type flags:
+ * BOND_VALFLAG_DEFAULT - mark the value as default
+ * BOND_VALFLAG_(MIN|MAX) - mark the value as min/max
+ */
+enum {
+	BOND_VALFLAG_DEFAULT	= BIT(0),
+	BOND_VALFLAG_MIN	= BIT(1),
+	BOND_VALFLAG_MAX	= BIT(2)
+};
+
+/* Option IDs, their bit positions correspond to their IDs */
+enum {
+	BOND_OPT_MODE,
+	BOND_OPT_PACKETS_PER_SLAVE,
+	BOND_OPT_XMIT_HASH,
+	BOND_OPT_ARP_VALIDATE,
+	BOND_OPT_ARP_ALL_TARGETS,
+	BOND_OPT_FAIL_OVER_MAC,
+	BOND_OPT_ARP_INTERVAL,
+	BOND_OPT_ARP_TARGETS,
+	BOND_OPT_DOWNDELAY,
+	BOND_OPT_UPDELAY,
+	BOND_OPT_LACP_RATE,
+	BOND_OPT_MINLINKS,
+	BOND_OPT_AD_SELECT,
+	BOND_OPT_NUM_PEER_NOTIF,
+	BOND_OPT_MIIMON,
+	BOND_OPT_PRIMARY,
+	BOND_OPT_PRIMARY_RESELECT,
+	BOND_OPT_USE_CARRIER,
+	BOND_OPT_ACTIVE_SLAVE,
+	BOND_OPT_QUEUE_ID,
+	BOND_OPT_ALL_SLAVES_ACTIVE,
+	BOND_OPT_RESEND_IGMP,
+	BOND_OPT_LP_INTERVAL,
+	BOND_OPT_SLAVES,
+	BOND_OPT_LAST
+};
+
+/* This structure is used for storing option values and for passing option
+ * values when changing an option. The logic when used as an arg is as follows:
+ * - if string != NULL -> parse it, if the opt is RAW type then return it, else
+ *   return the parse result
+ * - if string == NULL -> parse value
+ */
+struct bond_opt_value {
+	char *string;
+	u64 value;
+	u32 flags;
+};
+
+struct bonding;
+
+struct bond_option {
+	int id;
+	char *name;
+	char *desc;
+	u32 flags;
+
+	/* unsuppmodes is used to denote modes in which the option isn't
+	 * supported.
+	 */
+	unsigned long unsuppmodes;
+	/* supported values which this option can have, can be a subset of
+	 * BOND_OPTVAL_RANGE's value range
+	 */
+	struct bond_opt_value *values;
+
+	int (*set)(struct bonding *bond, struct bond_opt_value *val);
+};
+
+int __bond_opt_set(struct bonding *bond, unsigned int option,
+		   struct bond_opt_value *val);
+int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
+struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
+				      struct bond_opt_value *val);
+struct bond_option *bond_opt_get(unsigned int option);
+struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
+
+/* This helper is used to initialize a bond_opt_value structure for parameter
+ * passing. There should be either a valid string or value, but not both.
+ * When value is ULLONG_MAX then string will be used.
+ */
+static inline void __bond_opt_init(struct bond_opt_value *optval,
+				   char *string, u64 value)
+{
+	memset(optval, 0, sizeof(*optval));
+	optval->value = ULLONG_MAX;
+	if (value == ULLONG_MAX)
+		optval->string = string;
+	else
+		optval->value = value;
+}
+#define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value)
+#define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX)
+
+int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval);
+int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval);
+int bond_option_xmit_hash_policy_set(struct bonding *bond,
+				     struct bond_opt_value *newval);
+int bond_option_arp_validate_set(struct bonding *bond,
+				 struct bond_opt_value *newval);
+int bond_option_arp_all_targets_set(struct bonding *bond,
+				    struct bond_opt_value *newval);
+int bond_option_fail_over_mac_set(struct bonding *bond,
+				  struct bond_opt_value *newval);
+int bond_option_arp_interval_set(struct bonding *bond,
+				 struct bond_opt_value *newval);
+int bond_option_arp_ip_targets_set(struct bonding *bond,
+				   struct bond_opt_value *newval);
+void bond_option_arp_ip_targets_clear(struct bonding *bond);
+int bond_option_downdelay_set(struct bonding *bond,
+			      struct bond_opt_value *newval);
+int bond_option_updelay_set(struct bonding *bond,
+			    struct bond_opt_value *newval);
+int bond_option_lacp_rate_set(struct bonding *bond,
+			      struct bond_opt_value *newval);
+int bond_option_min_links_set(struct bonding *bond,
+			      struct bond_opt_value *newval);
+int bond_option_ad_select_set(struct bonding *bond,
+			      struct bond_opt_value *newval);
+int bond_option_num_peer_notif_set(struct bonding *bond,
+				   struct bond_opt_value *newval);
+int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval);
+int bond_option_primary_set(struct bonding *bond,
+			    struct bond_opt_value *newval);
+int bond_option_primary_reselect_set(struct bonding *bond,
+				     struct bond_opt_value *newval);
+int bond_option_use_carrier_set(struct bonding *bond,
+				struct bond_opt_value *newval);
+int bond_option_active_slave_set(struct bonding *bond,
+				 struct bond_opt_value *newval);
+int bond_option_queue_id_set(struct bonding *bond,
+			     struct bond_opt_value *newval);
+int bond_option_all_slaves_active_set(struct bonding *bond,
+				      struct bond_opt_value *newval);
+int bond_option_resend_igmp_set(struct bonding *bond,
+				struct bond_opt_value *newval);
+int bond_option_lp_interval_set(struct bonding *bond,
+				struct bond_opt_value *newval);
+int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval);
+#endif /* _BOND_OPTIONS_H */
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index fb868d6c22da..3ac20e78eafc 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -65,6 +65,7 @@ static void bond_info_seq_stop(struct seq_file *seq, void *v)
 static void bond_info_show_master(struct seq_file *seq)
 {
 	struct bonding *bond = seq->private;
+	struct bond_opt_value *optval;
 	struct slave *curr;
 	int i;
 
@@ -76,26 +77,32 @@ static void bond_info_show_master(struct seq_file *seq)
 		   bond_mode_name(bond->params.mode));
 
 	if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
-	    bond->params.fail_over_mac)
-		seq_printf(seq, " (fail_over_mac %s)",
-		   fail_over_mac_tbl[bond->params.fail_over_mac].modename);
+	    bond->params.fail_over_mac) {
+		optval = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
+					  bond->params.fail_over_mac);
+		seq_printf(seq, " (fail_over_mac %s)", optval->string);
+	}
 
 	seq_printf(seq, "\n");
 
 	if (bond->params.mode == BOND_MODE_XOR ||
 		bond->params.mode == BOND_MODE_8023AD) {
+		optval = bond_opt_get_val(BOND_OPT_XMIT_HASH,
+					  bond->params.xmit_policy);
 		seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
-			xmit_hashtype_tbl[bond->params.xmit_policy].modename,
-			bond->params.xmit_policy);
+			   optval->string, bond->params.xmit_policy);
 	}
 
 	if (USES_PRIMARY(bond->params.mode)) {
 		seq_printf(seq, "Primary Slave: %s",
 			   (bond->primary_slave) ?
 			   bond->primary_slave->dev->name : "None");
-		if (bond->primary_slave)
+		if (bond->primary_slave) {
+			optval = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT,
+						  bond->params.primary_reselect);
 			seq_printf(seq, " (primary_reselect %s)",
-		   pri_reselect_tbl[bond->params.primary_reselect].modename);
+				   optval->string);
+		}
 
 		seq_printf(seq, "\nCurrently Active Slave: %s\n",
 			   (curr) ? curr->dev->name : "None");
@@ -136,8 +143,10 @@ static void bond_info_show_master(struct seq_file *seq)
 		seq_printf(seq, "LACP rate: %s\n",
 			   (bond->params.lacp_fast) ? "fast" : "slow");
 		seq_printf(seq, "Min links: %d\n", bond->params.min_links);
+		optval = bond_opt_get_val(BOND_OPT_AD_SELECT,
+					  bond->params.ad_select);
 		seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
-			   ad_select_tbl[bond->params.ad_select].modename);
+			   optval->string);
 
 		if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
 			seq_printf(seq, "bond %s has no active aggregator\n",
@@ -159,18 +168,6 @@ static void bond_info_show_master(struct seq_file *seq)
 	}
 }
 
-static const char *bond_slave_link_status(s8 link)
-{
-	static const char * const status[] = {
-		[BOND_LINK_UP] = "up",
-		[BOND_LINK_FAIL] = "going down",
-		[BOND_LINK_DOWN] = "down",
-		[BOND_LINK_BACK] = "going back",
-	};
-
-	return status[link];
-}
-
 static void bond_info_show_slave(struct seq_file *seq,
 				 const struct slave *slave)
 {
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 0ae580bbc5db..643fcc110299 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -12,8 +12,7 @@
  * for more details.
  *
  * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in the
  * file called LICENSE.
@@ -40,7 +39,6 @@
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 #include <linux/nsproxy.h>
-#include <linux/reciprocal_div.h>
 
 #include "bonding.h"
 
@@ -202,58 +200,15 @@ static ssize_t bonding_store_slaves(struct device *d,
 				    struct device_attribute *attr,
 				    const char *buffer, size_t count)
 {
-	char command[IFNAMSIZ + 1] = { 0, };
-	char *ifname;
-	int res, ret = count;
-	struct net_device *dev;
 	struct bonding *bond = to_bond(d);
+	int ret;
 
-	if (!rtnl_trylock())
-		return restart_syscall();
-
-	sscanf(buffer, "%16s", command); /* IFNAMSIZ*/
-	ifname = command + 1;
-	if ((strlen(command) <= 1) ||
-	    !dev_valid_name(ifname))
-		goto err_no_cmd;
-
-	dev = __dev_get_by_name(dev_net(bond->dev), ifname);
-	if (!dev) {
-		pr_info("%s: Interface %s does not exist!\n",
-			bond->dev->name, ifname);
-		ret = -ENODEV;
-		goto out;
-	}
-
-	switch (command[0]) {
-	case '+':
-		pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name);
-		res = bond_enslave(bond->dev, dev);
-		break;
-
-	case '-':
-		pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name);
-		res = bond_release(bond->dev, dev);
-		break;
-
-	default:
-		goto err_no_cmd;
-	}
-
-	if (res)
-		ret = res;
-	goto out;
-
-err_no_cmd:
-	pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
-	       bond->dev->name);
-	ret = -EPERM;
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_SLAVES, (char *)buffer);
+	if (!ret)
+		ret = count;
 
-out:
-	rtnl_unlock();
 	return ret;
 }
-
 static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves,
 		   bonding_store_slaves);
 
@@ -265,37 +220,24 @@ static ssize_t bonding_show_mode(struct device *d,
 				 struct device_attribute *attr, char *buf)
 {
 	struct bonding *bond = to_bond(d);
+	struct bond_opt_value *val;
 
-	return sprintf(buf, "%s %d\n",
-			bond_mode_tbl[bond->params.mode].modename,
-			bond->params.mode);
+	val = bond_opt_get_val(BOND_OPT_MODE, bond->params.mode);
+
+	return sprintf(buf, "%s %d\n", val->string, bond->params.mode);
 }
 
 static ssize_t bonding_store_mode(struct device *d,
 				  struct device_attribute *attr,
 				  const char *buf, size_t count)
 {
-	int new_value, ret;
 	struct bonding *bond = to_bond(d);
+	int ret;
 
-	new_value = bond_parse_parm(buf, bond_mode_tbl);
-	if (new_value < 0)  {
-		pr_err("%s: Ignoring invalid mode value %.*s.\n",
-		       bond->dev->name, (int)strlen(buf) - 1, buf);
-		return -EINVAL;
-	}
-	if (!rtnl_trylock())
-		return restart_syscall();
-
-	ret = bond_option_mode_set(bond, new_value);
-	if (!ret) {
-		pr_info("%s: setting mode to %s (%d).\n",
-			bond->dev->name, bond_mode_tbl[new_value].modename,
-			new_value);
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MODE, (char *)buf);
+	if (!ret)
 		ret = count;
-	}
 
-	rtnl_unlock();
 	return ret;
 }
 static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
@@ -309,31 +251,23 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
 				      char *buf)
 {
 	struct bonding *bond = to_bond(d);
+	struct bond_opt_value *val;
 
-	return sprintf(buf, "%s %d\n",
-		       xmit_hashtype_tbl[bond->params.xmit_policy].modename,
-		       bond->params.xmit_policy);
+	val = bond_opt_get_val(BOND_OPT_XMIT_HASH, bond->params.xmit_policy);
+
+	return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy);
 }
 
 static ssize_t bonding_store_xmit_hash(struct device *d,
 				       struct device_attribute *attr,
 				       const char *buf, size_t count)
 {
-	int new_value, ret = count;
 	struct bonding *bond = to_bond(d);
+	int ret;
 
-	new_value = bond_parse_parm(buf, xmit_hashtype_tbl);
-	if (new_value < 0)  {
-		pr_err("%s: Ignoring invalid xmit hash policy value %.*s.\n",
-		       bond->dev->name,
-		       (int)strlen(buf) - 1, buf);
-		ret = -EINVAL;
-	} else {
-		bond->params.xmit_policy = new_value;
-		pr_info("%s: setting xmit hash policy to %s (%d).\n",
-			bond->dev->name,
-			xmit_hashtype_tbl[new_value].modename, new_value);
-	}
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_XMIT_HASH, (char *)buf);
+	if (!ret)
+		ret = count;
 
 	return ret;
 }
@@ -348,10 +282,12 @@ static ssize_t bonding_show_arp_validate(struct device *d,
 					 char *buf)
 {
 	struct bonding *bond = to_bond(d);
+	struct bond_opt_value *val;
 
-	return sprintf(buf, "%s %d\n",
-		       arp_validate_tbl[bond->params.arp_validate].modename,
-		       bond->params.arp_validate);
+	val = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
+			       bond->params.arp_validate);
+
+	return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate);
 }
 
 static ssize_t bonding_store_arp_validate(struct device *d,
@@ -359,36 +295,11 @@ static ssize_t bonding_store_arp_validate(struct device *d,
 					  const char *buf, size_t count)
 {
 	struct bonding *bond = to_bond(d);
-	int new_value, ret = count;
+	int ret;
 
-	if (!rtnl_trylock())
-		return restart_syscall();
-	new_value = bond_parse_parm(buf, arp_validate_tbl);
-	if (new_value < 0) {
-		pr_err("%s: Ignoring invalid arp_validate value %s\n",
-		       bond->dev->name, buf);
-		ret = -EINVAL;
-		goto out;
-	}
-	if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
-		pr_err("%s: arp_validate only supported in active-backup mode.\n",
-		       bond->dev->name);
-		ret = -EINVAL;
-		goto out;
-	}
-	pr_info("%s: setting arp_validate to %s (%d).\n",
-		bond->dev->name, arp_validate_tbl[new_value].modename,
-		new_value);
-
-	if (bond->dev->flags & IFF_UP) {
-		if (!new_value)
-			bond->recv_probe = NULL;
-		else if (bond->params.arp_interval)
-			bond->recv_probe = bond_arp_rcv;
-	}
-	bond->params.arp_validate = new_value;
-out:
-	rtnl_unlock();
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_VALIDATE, (char *)buf);
+	if (!ret)
+		ret = count;
 
 	return ret;
 }
@@ -403,10 +314,12 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
 					 char *buf)
 {
 	struct bonding *bond = to_bond(d);
-	int value = bond->params.arp_all_targets;
+	struct bond_opt_value *val;
 
-	return sprintf(buf, "%s %d\n", arp_all_targets_tbl[value].modename,
-		       value);
+	val = bond_opt_get_val(BOND_OPT_ARP_ALL_TARGETS,
+			       bond->params.arp_all_targets);
+	return sprintf(buf, "%s %d\n",
+		       val->string, bond->params.arp_all_targets);
 }
 
 static ssize_t bonding_store_arp_all_targets(struct device *d,
@@ -414,21 +327,13 @@ static ssize_t bonding_store_arp_all_targets(struct device *d,
 					  const char *buf, size_t count)
 {
 	struct bonding *bond = to_bond(d);
-	int new_value;
-
-	new_value = bond_parse_parm(buf, arp_all_targets_tbl);
-	if (new_value < 0) {
-		pr_err("%s: Ignoring invalid arp_all_targets value %s\n",
-		       bond->dev->name, buf);
-		return -EINVAL;
-	}
-	pr_info("%s: setting arp_all_targets to %s (%d).\n",
-		bond->dev->name, arp_all_targets_tbl[new_value].modename,
-		new_value);
+	int ret;
 
-	bond->params.arp_all_targets = new_value;
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_ALL_TARGETS, (char *)buf);
+	if (!ret)
+		ret = count;
 
-	return count;
+	return ret;
 }
 
 static DEVICE_ATTR(arp_all_targets, S_IRUGO | S_IWUSR,
@@ -443,44 +348,25 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
 					  char *buf)
 {
 	struct bonding *bond = to_bond(d);
+	struct bond_opt_value *val;
 
-	return sprintf(buf, "%s %d\n",
-		       fail_over_mac_tbl[bond->params.fail_over_mac].modename,
-		       bond->params.fail_over_mac);
+	val = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
+			       bond->params.fail_over_mac);
+
+	return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
 }
 
 static ssize_t bonding_store_fail_over_mac(struct device *d,
 					   struct device_attribute *attr,
 					   const char *buf, size_t count)
 {
-	int new_value, ret = count;
 	struct bonding *bond = to_bond(d);
+	int ret;
 
-	if (!rtnl_trylock())
-		return restart_syscall();
-
-	if (bond_has_slaves(bond)) {
-		pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
-		       bond->dev->name);
-		ret = -EPERM;
-		goto out;
-	}
-
-	new_value = bond_parse_parm(buf, fail_over_mac_tbl);
-	if (new_value < 0) {
-		pr_err("%s: Ignoring invalid fail_over_mac value %s.\n",
-		       bond->dev->name, buf);
-		ret = -EINVAL;
-		goto out;
-	}
-
-	bond->params.fail_over_mac = new_value;
-	pr_info("%s: Setting fail_over_mac to %s (%d).\n",
-		bond->dev->name, fail_over_mac_tbl[new_value].modename,
-		new_value);
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_FAIL_OVER_MAC, (char *)buf);
+	if (!ret)
+		ret = count;
 
-out:
-	rtnl_unlock();
 	return ret;
 }
 
@@ -507,61 +393,12 @@ static ssize_t bonding_store_arp_interval(struct device *d,
 					  const char *buf, size_t count)
 {
 	struct bonding *bond = to_bond(d);
-	int new_value, ret = count;
+	int ret;
+
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_INTERVAL, (char *)buf);
+	if (!ret)
+		ret = count;
 
-	if (!rtnl_trylock())
-		return restart_syscall();
-	if (sscanf(buf, "%d", &new_value) != 1) {
-		pr_err("%s: no arp_interval value specified.\n",
-		       bond->dev->name);
-		ret = -EINVAL;
-		goto out;
-	}
-	if (new_value < 0) {
-		pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n",
-		       bond->dev->name, new_value, INT_MAX);
-		ret = -EINVAL;
-		goto out;
-	}
-	if (BOND_NO_USES_ARP(bond->params.mode)) {
-		pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n",
-			bond->dev->name, bond->dev->name);
-		ret = -EINVAL;
-		goto out;
-	}
-	pr_info("%s: Setting ARP monitoring interval to %d.\n",
-		bond->dev->name, new_value);
-	bond->params.arp_interval = new_value;
-	if (new_value) {
-		if (bond->params.miimon) {
-			pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
-				bond->dev->name, bond->dev->name);
-			bond->params.miimon = 0;
-		}
-		if (!bond->params.arp_targets[0])
-			pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
-				bond->dev->name);
-	}
-	if (bond->dev->flags & IFF_UP) {
-		/* If the interface is up, we may need to fire off
-		 * the ARP timer.  If the interface is down, the
-		 * timer will get fired off when the open function
-		 * is called.
-		 */
-		if (!new_value) {
-			if (bond->params.arp_validate)
-				bond->recv_probe = NULL;
-			cancel_delayed_work_sync(&bond->arp_work);
-		} else {
-			/* arp_validate can be set only in active-backup mode */
-			if (bond->params.arp_validate)
-				bond->recv_probe = bond_arp_rcv;
-			cancel_delayed_work_sync(&bond->mii_work);
-			queue_delayed_work(bond->wq, &bond->arp_work, 0);
-		}
-	}
-out:
-	rtnl_unlock();
 	return ret;
 }
 static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
@@ -574,8 +411,8 @@ static ssize_t bonding_show_arp_targets(struct device *d,
 					struct device_attribute *attr,
 					char *buf)
 {
-	int i, res = 0;
 	struct bonding *bond = to_bond(d);
+	int i, res = 0;
 
 	for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
 		if (bond->params.arp_targets[i])
@@ -584,6 +421,7 @@ static ssize_t bonding_show_arp_targets(struct device *d,
 	}
 	if (res)
 		buf[res-1] = '\n'; /* eat the leftover space */
+
 	return res;
 }
 
@@ -592,82 +430,12 @@ static ssize_t bonding_store_arp_targets(struct device *d,
 					 const char *buf, size_t count)
 {
 	struct bonding *bond = to_bond(d);
-	struct list_head *iter;
-	struct slave *slave;
-	__be32 newtarget, *targets;
-	unsigned long *targets_rx;
-	int ind, i, j, ret = -EINVAL;
-
-	if (!rtnl_trylock())
-		return restart_syscall();
-
-	targets = bond->params.arp_targets;
-	if (!in4_pton(buf + 1, -1, (u8 *)&newtarget, -1, NULL) ||
-	    IS_IP_TARGET_UNUSABLE_ADDRESS(newtarget)) {
-		pr_err("%s: invalid ARP target %pI4 specified for addition\n",
-		       bond->dev->name, &newtarget);
-		goto out;
-	}
-	/* look for adds */
-	if (buf[0] == '+') {
-		if (bond_get_targets_ip(targets, newtarget) != -1) { /* dup */
-			pr_err("%s: ARP target %pI4 is already present\n",
-			       bond->dev->name, &newtarget);
-			goto out;
-		}
-
-		ind = bond_get_targets_ip(targets, 0); /* first free slot */
-		if (ind == -1) {
-			pr_err("%s: ARP target table is full!\n",
-			       bond->dev->name);
-			goto out;
-		}
-
-		pr_info("%s: adding ARP target %pI4.\n", bond->dev->name,
-			 &newtarget);
-		/* not to race with bond_arp_rcv */
-		write_lock_bh(&bond->lock);
-		bond_for_each_slave(bond, slave, iter)
-			slave->target_last_arp_rx[ind] = jiffies;
-		targets[ind] = newtarget;
-		write_unlock_bh(&bond->lock);
-	} else if (buf[0] == '-')	{
-		ind = bond_get_targets_ip(targets, newtarget);
-		if (ind == -1) {
-			pr_err("%s: unable to remove nonexistent ARP target %pI4.\n",
-				bond->dev->name, &newtarget);
-			goto out;
-		}
-
-		if (ind == 0 && !targets[1] && bond->params.arp_interval)
-			pr_warn("%s: removing last arp target with arp_interval on\n",
-				bond->dev->name);
-
-		pr_info("%s: removing ARP target %pI4.\n", bond->dev->name,
-			&newtarget);
+	int ret;
 
-		write_lock_bh(&bond->lock);
-		bond_for_each_slave(bond, slave, iter) {
-			targets_rx = slave->target_last_arp_rx;
-			j = ind;
-			for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
-				targets_rx[j] = targets_rx[j+1];
-			targets_rx[j] = 0;
-		}
-		for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
-			targets[i] = targets[i+1];
-		targets[i] = 0;
-		write_unlock_bh(&bond->lock);
-	} else {
-		pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
-		       bond->dev->name);
-		ret = -EPERM;
-		goto out;
-	}
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_TARGETS, (char *)buf);
+	if (!ret)
+		ret = count;
 
-	ret = count;
-out:
-	rtnl_unlock();
 	return ret;
 }
 static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
@@ -690,45 +458,13 @@ static ssize_t bonding_store_downdelay(struct device *d,
 				       struct device_attribute *attr,
 				       const char *buf, size_t count)
 {
-	int new_value, ret = count;
 	struct bonding *bond = to_bond(d);
+	int ret;
 
-	if (!rtnl_trylock())
-		return restart_syscall();
-	if (!(bond->params.miimon)) {
-		pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
-		       bond->dev->name);
-		ret = -EPERM;
-		goto out;
-	}
-
-	if (sscanf(buf, "%d", &new_value) != 1) {
-		pr_err("%s: no down delay value specified.\n", bond->dev->name);
-		ret = -EINVAL;
-		goto out;
-	}
-	if (new_value < 0) {
-		pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
-		       bond->dev->name, new_value, 0, INT_MAX);
-		ret = -EINVAL;
-		goto out;
-	} else {
-		if ((new_value % bond->params.miimon) != 0) {
-			pr_warning("%s: Warning: down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n",
-				   bond->dev->name, new_value,
-				   bond->params.miimon,
-				   (new_value / bond->params.miimon) *
-				   bond->params.miimon);
-		}
-		bond->params.downdelay = new_value / bond->params.miimon;
-		pr_info("%s: Setting down delay to %d.\n",
-			bond->dev->name,
-			bond->params.downdelay * bond->params.miimon);
-
-	}
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_DOWNDELAY, (char *)buf);
+	if (!ret)
+		ret = count;
 
-out:
-	rtnl_unlock();
 	return ret;
 }
 static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
@@ -748,45 +484,13 @@ static ssize_t bonding_store_updelay(struct device *d,
 				     struct device_attribute *attr,
 				     const char *buf, size_t count)
 {
-	int new_value, ret = count;
 	struct bonding *bond = to_bond(d);
+	int ret;
 
-	if (!rtnl_trylock())
-		return restart_syscall();
-	if (!(bond->params.miimon)) {
-		pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
-		       bond->dev->name);
-		ret = -EPERM;
-		goto out;
-	}
-
-	if (sscanf(buf, "%d", &new_value) != 1) {
-		pr_err("%s: no up delay value specified.\n",
-		       bond->dev->name);
-		ret = -EINVAL;
-		goto out;
-	}
-	if (new_value < 0) {
-		pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n",
-		       bond->dev->name, new_value, 0, INT_MAX);
-		ret = -EINVAL;
-		goto out;
-	} else {
-		if ((new_value % bond->params.miimon) != 0) {
-			pr_warning("%s: Warning: up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
-				   bond->dev->name, new_value,
-				   bond->params.miimon,
-				   (new_value / bond->params.miimon) *
-				   bond->params.miimon);
-		}
-		bond->params.updelay = new_value / bond->params.miimon;
-		pr_info("%s: Setting up delay to %d.\n",
-			bond->dev->name,
-			bond->params.updelay * bond->params.miimon);
-	}
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_UPDELAY, (char *)buf);
+	if (!ret)
+		ret = count;
 
-out:
-	rtnl_unlock();
 	return ret;
 }
 static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
@@ -801,10 +505,11 @@ static ssize_t bonding_show_lacp(struct device *d,
 				 char *buf)
 {
 	struct bonding *bond = to_bond(d);
+	struct bond_opt_value *val;
 
-	return sprintf(buf, "%s %d\n",
-		bond_lacp_tbl[bond->params.lacp_fast].modename,
-		bond->params.lacp_fast);
+	val = bond_opt_get_val(BOND_OPT_LACP_RATE, bond->params.lacp_fast);
+
+	return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast);
 }
 
 static ssize_t bonding_store_lacp(struct device *d,
@@ -812,40 +517,11 @@ static ssize_t bonding_store_lacp(struct device *d,
 				  const char *buf, size_t count)
 {
 	struct bonding *bond = to_bond(d);
-	int new_value, ret = count;
-
-	if (!rtnl_trylock())
-		return restart_syscall();
-
-	if (bond->dev->flags & IFF_UP) {
-		pr_err("%s: Unable to update LACP rate because interface is up.\n",
-		       bond->dev->name);
-		ret = -EPERM;
-		goto out;
-	}
-
-	if (bond->params.mode != BOND_MODE_8023AD) {
-		pr_err("%s: Unable to update LACP rate because bond is not in 802.3ad mode.\n",
-		       bond->dev->name);
-		ret = -EPERM;
-		goto out;
-	}
+	int ret;
 
-	new_value = bond_parse_parm(buf, bond_lacp_tbl);
-
-	if ((new_value == 1) || (new_value == 0)) {
-		bond->params.lacp_fast = new_value;
-		bond_3ad_update_lacp_rate(bond);
-		pr_info("%s: Setting LACP rate to %s (%d).\n",
-			bond->dev->name, bond_lacp_tbl[new_value].modename,
-			new_value);
-	} else {
-		pr_err("%s: Ignoring invalid LACP rate value %.*s.\n",
-		       bond->dev->name, (int)strlen(buf) - 1, buf);
-		ret = -EINVAL;
-	}
-out:
-	rtnl_unlock();
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LACP_RATE, (char *)buf);
+	if (!ret)
+		ret = count;
 
 	return ret;
 }
@@ -867,19 +543,12 @@ static ssize_t bonding_store_min_links(struct device *d,
 {
 	struct bonding *bond = to_bond(d);
 	int ret;
-	unsigned int new_value;
 
-	ret = kstrtouint(buf, 0, &new_value);
-	if (ret < 0) {
-		pr_err("%s: Ignoring invalid min links value %s.\n",
-		       bond->dev->name, buf);
-		return ret;
-	}
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MINLINKS, (char *)buf);
+	if (!ret)
+		ret = count;
 
-	pr_info("%s: Setting min links value to %u\n",
-		bond->dev->name, new_value);
-	bond->params.min_links = new_value;
-	return count;
+	return ret;
 }
 static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR,
 		   bonding_show_min_links, bonding_store_min_links);
@@ -889,10 +558,11 @@ static ssize_t bonding_show_ad_select(struct device *d,
 				      char *buf)
 {
 	struct bonding *bond = to_bond(d);
+	struct bond_opt_value *val;
 
-	return sprintf(buf, "%s %d\n",
-		ad_select_tbl[bond->params.ad_select].modename,
-		bond->params.ad_select);
+	val = bond_opt_get_val(BOND_OPT_AD_SELECT, bond->params.ad_select);
+
+	return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select);
 }
 
 
@@ -900,29 +570,13 @@ static ssize_t bonding_store_ad_select(struct device *d,
 				       struct device_attribute *attr,
 				       const char *buf, size_t count)
 {
-	int new_value, ret = count;
 	struct bonding *bond = to_bond(d);
+	int ret;
 
-	if (bond->dev->flags & IFF_UP) {
-		pr_err("%s: Unable to update ad_select because interface is up.\n",
-		       bond->dev->name);
-		ret = -EPERM;
-		goto out;
-	}
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_AD_SELECT, (char *)buf);
+	if (!ret)
+		ret = count;
 
-	new_value = bond_parse_parm(buf, ad_select_tbl);
-
-	if (new_value != -1) {
-		bond->params.ad_select = new_value;
-		pr_info("%s: Setting ad_select to %s (%d).\n",
-			bond->dev->name, ad_select_tbl[new_value].modename,
-			new_value);
-	} else {
-		pr_err("%s: Ignoring invalid ad_select value %.*s.\n",
-		       bond->dev->name, (int)strlen(buf) - 1, buf);
-		ret = -EINVAL;
-	}
-out:
 	return ret;
 }
 static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
@@ -944,8 +598,13 @@ static ssize_t bonding_store_num_peer_notif(struct device *d,
 					    const char *buf, size_t count)
 {
 	struct bonding *bond = to_bond(d);
-	int err = kstrtou8(buf, 10, &bond->params.num_peer_notif);
-	return err ? err : count;
+	int ret;
+
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_NUM_PEER_NOTIF, (char *)buf);
+	if (!ret)
+		ret = count;
+
+	return ret;
 }
 static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
 		   bonding_show_num_peer_notif, bonding_store_num_peer_notif);
@@ -971,56 +630,13 @@ static ssize_t bonding_store_miimon(struct device *d,
 				    struct device_attribute *attr,
 				    const char *buf, size_t count)
 {
-	int new_value, ret = count;
 	struct bonding *bond = to_bond(d);
+	int ret;
+
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MIIMON, (char *)buf);
+	if (!ret)
+		ret = count;
 
-	if (!rtnl_trylock())
-		return restart_syscall();
-	if (sscanf(buf, "%d", &new_value) != 1) {
-		pr_err("%s: no miimon value specified.\n",
-		       bond->dev->name);
-		ret = -EINVAL;
-		goto out;
-	}
-	if (new_value < 0) {
-		pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
-		       bond->dev->name, new_value, 0, INT_MAX);
-		ret = -EINVAL;
-		goto out;
-	}
-	pr_info("%s: Setting MII monitoring interval to %d.\n",
-		bond->dev->name, new_value);
-	bond->params.miimon = new_value;
-	if (bond->params.updelay)
-		pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
-			bond->dev->name,
-			bond->params.updelay * bond->params.miimon);
-	if (bond->params.downdelay)
-		pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
-			bond->dev->name,
-			bond->params.downdelay * bond->params.miimon);
-	if (new_value && bond->params.arp_interval) {
-		pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
-			bond->dev->name);
-		bond->params.arp_interval = 0;
-		if (bond->params.arp_validate)
-			bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
-	}
-	if (bond->dev->flags & IFF_UP) {
-		/* If the interface is up, we may need to fire off
-		 * the MII timer. If the interface is down, the
-		 * timer will get fired off when the open function
-		 * is called.
-		 */
-		if (!new_value) {
-			cancel_delayed_work_sync(&bond->mii_work);
-		} else {
-			cancel_delayed_work_sync(&bond->arp_work);
-			queue_delayed_work(bond->wq, &bond->mii_work, 0);
-		}
-	}
-out:
-	rtnl_unlock();
 	return ret;
 }
 static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
@@ -1051,58 +667,13 @@ static ssize_t bonding_store_primary(struct device *d,
 				     const char *buf, size_t count)
 {
 	struct bonding *bond = to_bond(d);
-	struct list_head *iter;
-	char ifname[IFNAMSIZ];
-	struct slave *slave;
-
-	if (!rtnl_trylock())
-		return restart_syscall();
-	block_netpoll_tx();
-	read_lock(&bond->lock);
-	write_lock_bh(&bond->curr_slave_lock);
-
-	if (!USES_PRIMARY(bond->params.mode)) {
-		pr_info("%s: Unable to set primary slave; %s is in mode %d\n",
-			bond->dev->name, bond->dev->name, bond->params.mode);
-		goto out;
-	}
-
-	sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
-
-	/* check to see if we are clearing primary */
-	if (!strlen(ifname) || buf[0] == '\n') {
-		pr_info("%s: Setting primary slave to None.\n",
-			bond->dev->name);
-		bond->primary_slave = NULL;
-		memset(bond->params.primary, 0, sizeof(bond->params.primary));
-		bond_select_active_slave(bond);
-		goto out;
-	}
-
-	bond_for_each_slave(bond, slave, iter) {
-		if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
-			pr_info("%s: Setting %s as primary slave.\n",
-				bond->dev->name, slave->dev->name);
-			bond->primary_slave = slave;
-			strcpy(bond->params.primary, slave->dev->name);
-			bond_select_active_slave(bond);
-			goto out;
-		}
-	}
-
-	strncpy(bond->params.primary, ifname, IFNAMSIZ);
-	bond->params.primary[IFNAMSIZ - 1] = 0;
+	int ret;
 
-	pr_info("%s: Recording %s as primary, "
-		"but it has not been enslaved to %s yet.\n",
-		bond->dev->name, ifname, bond->dev->name);
-out:
-	write_unlock_bh(&bond->curr_slave_lock);
-	read_unlock(&bond->lock);
-	unblock_netpoll_tx();
-	rtnl_unlock();
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY, (char *)buf);
+	if (!ret)
+		ret = count;
 
-	return count;
+	return ret;
 }
 static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR,
 		   bonding_show_primary, bonding_store_primary);
@@ -1115,45 +686,27 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
 					     char *buf)
 {
 	struct bonding *bond = to_bond(d);
+	struct bond_opt_value *val;
+
+	val = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT,
+			       bond->params.primary_reselect);
 
 	return sprintf(buf, "%s %d\n",
-		       pri_reselect_tbl[bond->params.primary_reselect].modename,
-		       bond->params.primary_reselect);
+		       val->string, bond->params.primary_reselect);
 }
 
 static ssize_t bonding_store_primary_reselect(struct device *d,
 					      struct device_attribute *attr,
 					      const char *buf, size_t count)
 {
-	int new_value, ret = count;
 	struct bonding *bond = to_bond(d);
+	int ret;
 
-	if (!rtnl_trylock())
-		return restart_syscall();
-
-	new_value = bond_parse_parm(buf, pri_reselect_tbl);
-	if (new_value < 0)  {
-		pr_err("%s: Ignoring invalid primary_reselect value %.*s.\n",
-		       bond->dev->name,
-		       (int) strlen(buf) - 1, buf);
-		ret = -EINVAL;
-		goto out;
-	}
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY_RESELECT,
+				   (char *)buf);
+	if (!ret)
+		ret = count;
 
-	bond->params.primary_reselect = new_value;
-	pr_info("%s: setting primary_reselect to %s (%d).\n",
-		bond->dev->name, pri_reselect_tbl[new_value].modename,
-		new_value);
-
-	block_netpoll_tx();
-	read_lock(&bond->lock);
-	write_lock_bh(&bond->curr_slave_lock);
-	bond_select_active_slave(bond);
-	write_unlock_bh(&bond->curr_slave_lock);
-	read_unlock(&bond->lock);
-	unblock_netpoll_tx();
-out:
-	rtnl_unlock();
 	return ret;
 }
 static DEVICE_ATTR(primary_reselect, S_IRUGO | S_IWUSR,
@@ -1176,25 +729,13 @@ static ssize_t bonding_store_carrier(struct device *d,
 				     struct device_attribute *attr,
 				     const char *buf, size_t count)
 {
-	int new_value, ret = count;
 	struct bonding *bond = to_bond(d);
+	int ret;
 
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_USE_CARRIER, (char *)buf);
+	if (!ret)
+		ret = count;
 
-	if (sscanf(buf, "%d", &new_value) != 1) {
-		pr_err("%s: no use_carrier value specified.\n",
-		       bond->dev->name);
-		ret = -EINVAL;
-		goto out;
-	}
-	if ((new_value == 0) || (new_value == 1)) {
-		bond->params.use_carrier = new_value;
-		pr_info("%s: Setting use_carrier to %d.\n",
-			bond->dev->name, new_value);
-	} else {
-		pr_info("%s: Ignoring invalid use_carrier value %d.\n",
-			bond->dev->name, new_value);
-	}
-out:
 	return ret;
 }
 static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
@@ -1225,34 +766,14 @@ static ssize_t bonding_store_active_slave(struct device *d,
 					  struct device_attribute *attr,
 					  const char *buf, size_t count)
 {
-	int ret;
 	struct bonding *bond = to_bond(d);
-	char ifname[IFNAMSIZ];
-	struct net_device *dev;
-
-	if (!rtnl_trylock())
-		return restart_syscall();
-
-	sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
-	if (!strlen(ifname) || buf[0] == '\n') {
-		dev = NULL;
-	} else {
-		dev = __dev_get_by_name(dev_net(bond->dev), ifname);
-		if (!dev) {
-			ret = -ENODEV;
-			goto out;
-		}
-	}
+	int ret;
 
-	ret = bond_option_active_slave_set(bond, dev);
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ACTIVE_SLAVE, (char *)buf);
 	if (!ret)
 		ret = count;
 
- out:
-	rtnl_unlock();
-
 	return ret;
-
 }
 static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
 		   bonding_show_active_slave, bonding_store_active_slave);
@@ -1421,72 +942,15 @@ static ssize_t bonding_store_queue_id(struct device *d,
 				      struct device_attribute *attr,
 				      const char *buffer, size_t count)
 {
-	struct slave *slave, *update_slave;
 	struct bonding *bond = to_bond(d);
-	struct list_head *iter;
-	u16 qid;
-	int ret = count;
-	char *delim;
-	struct net_device *sdev = NULL;
-
-	if (!rtnl_trylock())
-		return restart_syscall();
-
-	/* delim will point to queue id if successful */
-	delim = strchr(buffer, ':');
-	if (!delim)
-		goto err_no_cmd;
-
-	/*
-	 * Terminate string that points to device name and bump it
-	 * up one, so we can read the queue id there.
-	 */
-	*delim = '\0';
-	if (sscanf(++delim, "%hd\n", &qid) != 1)
-		goto err_no_cmd;
-
-	/* Check buffer length, valid ifname and queue id */
-	if (strlen(buffer) > IFNAMSIZ ||
-	    !dev_valid_name(buffer) ||
-	    qid > bond->dev->real_num_tx_queues)
-		goto err_no_cmd;
-
-	/* Get the pointer to that interface if it exists */
-	sdev = __dev_get_by_name(dev_net(bond->dev), buffer);
-	if (!sdev)
-		goto err_no_cmd;
-
-	/* Search for thes slave and check for duplicate qids */
-	update_slave = NULL;
-	bond_for_each_slave(bond, slave, iter) {
-		if (sdev == slave->dev)
-			/*
-			 * We don't need to check the matching
-			 * slave for dups, since we're overwriting it
-			 */
-			update_slave = slave;
-		else if (qid && qid == slave->queue_id) {
-			goto err_no_cmd;
-		}
-	}
-
-	if (!update_slave)
-		goto err_no_cmd;
+	int ret;
 
-	/* Actually set the qids for the slave */
-	update_slave->queue_id = qid;
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_QUEUE_ID, (char *)buffer);
+	if (!ret)
+		ret = count;
 
-out:
-	rtnl_unlock();
 	return ret;
-
-err_no_cmd:
-	pr_info("invalid input for queue_id set for %s.\n",
-		bond->dev->name);
-	ret = -EPERM;
-	goto out;
 }
-
 static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id,
 		   bonding_store_queue_id);
 
@@ -1508,42 +972,13 @@ static ssize_t bonding_store_slaves_active(struct device *d,
 					   const char *buf, size_t count)
 {
 	struct bonding *bond = to_bond(d);
-	int new_value, ret = count;
-	struct list_head *iter;
-	struct slave *slave;
-
-	if (!rtnl_trylock())
-		return restart_syscall();
-
-	if (sscanf(buf, "%d", &new_value) != 1) {
-		pr_err("%s: no all_slaves_active value specified.\n",
-		       bond->dev->name);
-		ret = -EINVAL;
-		goto out;
-	}
-
-	if (new_value == bond->params.all_slaves_active)
-		goto out;
+	int ret;
 
-	if ((new_value == 0) || (new_value == 1)) {
-		bond->params.all_slaves_active = new_value;
-	} else {
-		pr_info("%s: Ignoring invalid all_slaves_active value %d.\n",
-			bond->dev->name, new_value);
-		ret = -EINVAL;
-		goto out;
-	}
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ALL_SLAVES_ACTIVE,
+				   (char *)buf);
+	if (!ret)
+		ret = count;
 
-	bond_for_each_slave(bond, slave, iter) {
-		if (!bond_is_active_slave(slave)) {
-			if (new_value)
-				slave->inactive = 0;
-			else
-				slave->inactive = 1;
-		}
-	}
-out:
-	rtnl_unlock();
 	return ret;
 }
 static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
@@ -1565,27 +1000,13 @@ static ssize_t bonding_store_resend_igmp(struct device *d,
 					 struct device_attribute *attr,
 					 const char *buf, size_t count)
 {
-	int new_value, ret = count;
 	struct bonding *bond = to_bond(d);
+	int ret;
 
-	if (sscanf(buf, "%d", &new_value) != 1) {
-		pr_err("%s: no resend_igmp value specified.\n",
-		       bond->dev->name);
-		ret = -EINVAL;
-		goto out;
-	}
-
-	if (new_value < 0 || new_value > 255) {
-		pr_err("%s: Invalid resend_igmp value %d not in range 0-255; rejected.\n",
-		       bond->dev->name, new_value);
-		ret = -EINVAL;
-		goto out;
-	}
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_RESEND_IGMP, (char *)buf);
+	if (!ret)
+		ret = count;
 
-	pr_info("%s: Setting resend_igmp to %d.\n",
-		bond->dev->name, new_value);
-	bond->params.resend_igmp = new_value;
-out:
 	return ret;
 }
 
@@ -1606,24 +1027,12 @@ static ssize_t bonding_store_lp_interval(struct device *d,
 					 const char *buf, size_t count)
 {
 	struct bonding *bond = to_bond(d);
-	int new_value, ret = count;
-
-	if (sscanf(buf, "%d", &new_value) != 1) {
-		pr_err("%s: no lp interval value specified.\n",
-			bond->dev->name);
-		ret = -EINVAL;
-		goto out;
-	}
+	int ret;
 
-	if (new_value <= 0) {
-		pr_err ("%s: lp_interval must be between 1 and %d\n",
-			bond->dev->name, INT_MAX);
-		ret = -EINVAL;
-		goto out;
-	}
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LP_INTERVAL, (char *)buf);
+	if (!ret)
+		ret = count;
 
-	bond->params.lp_interval = new_value;
-out:
 	return ret;
 }
 
@@ -1636,10 +1045,6 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
 {
 	struct bonding *bond = to_bond(d);
 	unsigned int packets_per_slave = bond->params.packets_per_slave;
-
-	if (packets_per_slave > 1)
-		packets_per_slave = reciprocal_value(packets_per_slave);
-
 	return sprintf(buf, "%u\n", packets_per_slave);
 }
 
@@ -1648,28 +1053,13 @@ static ssize_t bonding_store_packets_per_slave(struct device *d,
 					       const char *buf, size_t count)
 {
 	struct bonding *bond = to_bond(d);
-	int new_value, ret = count;
+	int ret;
+
+	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PACKETS_PER_SLAVE,
+				   (char *)buf);
+	if (!ret)
+		ret = count;
 
-	if (sscanf(buf, "%d", &new_value) != 1) {
-		pr_err("%s: no packets_per_slave value specified.\n",
-		       bond->dev->name);
-		ret = -EINVAL;
-		goto out;
-	}
-	if (new_value < 0 || new_value > USHRT_MAX) {
-		pr_err("%s: packets_per_slave must be between 0 and %u\n",
-		       bond->dev->name, USHRT_MAX);
-		ret = -EINVAL;
-		goto out;
-	}
-	if (bond->params.mode != BOND_MODE_ROUNDROBIN)
-		pr_warn("%s: Warning: packets_per_slave has effect only in balance-rr mode\n",
-			bond->dev->name);
-	if (new_value > 1)
-		bond->params.packets_per_slave = reciprocal_value(new_value);
-	else
-		bond->params.packets_per_slave = new_value;
-out:
 	return ret;
 }
 
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
new file mode 100644
index 000000000000..2e4eec5450c8
--- /dev/null
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -0,0 +1,144 @@
+/*	Sysfs attributes of bond slaves
+ *
+ *      Copyright (c) 2014 Scott Feldman <sfeldma@cumulusnetworks.com>
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+#include "bonding.h"
+
+struct slave_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct slave *, char *);
+};
+
+#define SLAVE_ATTR(_name, _mode, _show)				\
+const struct slave_attribute slave_attr_##_name = {		\
+	.attr = {.name = __stringify(_name),			\
+		 .mode = _mode },				\
+	.show	= _show,					\
+};
+#define SLAVE_ATTR_RO(_name) \
+	SLAVE_ATTR(_name, S_IRUGO, _name##_show)
+
+static ssize_t state_show(struct slave *slave, char *buf)
+{
+	switch (bond_slave_state(slave)) {
+	case BOND_STATE_ACTIVE:
+		return sprintf(buf, "active\n");
+	case BOND_STATE_BACKUP:
+		return sprintf(buf, "backup\n");
+	default:
+		return sprintf(buf, "UNKONWN\n");
+	}
+}
+static SLAVE_ATTR_RO(state);
+
+static ssize_t mii_status_show(struct slave *slave, char *buf)
+{
+	return sprintf(buf, "%s\n", bond_slave_link_status(slave->link));
+}
+static SLAVE_ATTR_RO(mii_status);
+
+static ssize_t link_failure_count_show(struct slave *slave, char *buf)
+{
+	return sprintf(buf, "%d\n", slave->link_failure_count);
+}
+static SLAVE_ATTR_RO(link_failure_count);
+
+static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
+{
+	return sprintf(buf, "%pM\n", slave->perm_hwaddr);
+}
+static SLAVE_ATTR_RO(perm_hwaddr);
+
+static ssize_t queue_id_show(struct slave *slave, char *buf)
+{
+	return sprintf(buf, "%d\n", slave->queue_id);
+}
+static SLAVE_ATTR_RO(queue_id);
+
+static ssize_t ad_aggregator_id_show(struct slave *slave, char *buf)
+{
+	const struct aggregator *agg;
+
+	if (slave->bond->params.mode == BOND_MODE_8023AD) {
+		agg = SLAVE_AD_INFO(slave).port.aggregator;
+		if (agg)
+			return sprintf(buf, "%d\n",
+				       agg->aggregator_identifier);
+	}
+
+	return sprintf(buf, "N/A\n");
+}
+static SLAVE_ATTR_RO(ad_aggregator_id);
+
+static const struct slave_attribute *slave_attrs[] = {
+	&slave_attr_state,
+	&slave_attr_mii_status,
+	&slave_attr_link_failure_count,
+	&slave_attr_perm_hwaddr,
+	&slave_attr_queue_id,
+	&slave_attr_ad_aggregator_id,
+	NULL
+};
+
+#define to_slave_attr(_at) container_of(_at, struct slave_attribute, attr)
+#define to_slave(obj)	container_of(obj, struct slave, kobj)
+
+static ssize_t slave_show(struct kobject *kobj,
+			  struct attribute *attr, char *buf)
+{
+	struct slave_attribute *slave_attr = to_slave_attr(attr);
+	struct slave *slave = to_slave(kobj);
+
+	return slave_attr->show(slave, buf);
+}
+
+static const struct sysfs_ops slave_sysfs_ops = {
+	.show = slave_show,
+};
+
+static struct kobj_type slave_ktype = {
+#ifdef CONFIG_SYSFS
+	.sysfs_ops = &slave_sysfs_ops,
+#endif
+};
+
+int bond_sysfs_slave_add(struct slave *slave)
+{
+	const struct slave_attribute **a;
+	int err;
+
+	err = kobject_init_and_add(&slave->kobj, &slave_ktype,
+				   &(slave->dev->dev.kobj), "bonding_slave");
+	if (err)
+		return err;
+
+	for (a = slave_attrs; *a; ++a) {
+		err = sysfs_create_file(&slave->kobj, &((*a)->attr));
+		if (err) {
+			kobject_del(&slave->kobj);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+void bond_sysfs_slave_del(struct slave *slave)
+{
+	const struct slave_attribute **a;
+
+	for (a = slave_attrs; *a; ++a)
+		sysfs_remove_file(&slave->kobj, &((*a)->attr));
+
+	kobject_del(&slave->kobj);
+}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index a9f4f9f4d8ce..1a9062f4e0d6 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -23,8 +23,11 @@
 #include <linux/netpoll.h>
 #include <linux/inetdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/reciprocal_div.h>
+
 #include "bond_3ad.h"
 #include "bond_alb.h"
+#include "bond_options.h"
 
 #define DRV_VERSION	"3.7.1"
 #define DRV_RELDATE	"April 27, 2011"
@@ -101,6 +104,10 @@
 		netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \
 		NULL)
 
+/* Caller must have rcu_read_lock */
+#define bond_first_slave_rcu(bond) \
+	netdev_lower_get_first_private_rcu(bond->dev)
+
 #define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond))
 #define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond))
 
@@ -167,6 +174,7 @@ struct bond_params {
 	int resend_igmp;
 	int lp_interval;
 	int packets_per_slave;
+	struct reciprocal_value reciprocal_packets_per_slave;
 };
 
 struct bond_parm_tbl {
@@ -199,6 +207,7 @@ struct slave {
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	struct netpoll *np;
 #endif
+	struct kobject kobj;
 };
 
 /*
@@ -280,12 +289,18 @@ static inline bool bond_is_lb(const struct bonding *bond)
 
 static inline void bond_set_active_slave(struct slave *slave)
 {
-	slave->backup = 0;
+	if (slave->backup) {
+		slave->backup = 0;
+		rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+	}
 }
 
 static inline void bond_set_backup_slave(struct slave *slave)
 {
-	slave->backup = 1;
+	if (!slave->backup) {
+		slave->backup = 1;
+		rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+	}
 }
 
 static inline int bond_slave_state(struct slave *slave)
@@ -394,8 +409,8 @@ static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be3
 	in_dev = __in_dev_get_rcu(dev);
 
 	if (in_dev)
-		addr = inet_confirm_addr(in_dev, dst, local, RT_SCOPE_HOST);
-
+		addr = inet_confirm_addr(dev_net(dev), in_dev, dst, local,
+					 RT_SCOPE_HOST);
 	rcu_read_unlock();
 	return addr;
 }
@@ -412,19 +427,18 @@ static inline bool slave_can_tx(struct slave *slave)
 struct bond_net;
 
 int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
-int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
-void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
+void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
 int bond_create(struct net *net, const char *name);
 int bond_create_sysfs(struct bond_net *net);
 void bond_destroy_sysfs(struct bond_net *net);
 void bond_prepare_sysfs_group(struct bonding *bond);
+int bond_sysfs_slave_add(struct slave *slave);
+void bond_sysfs_slave_del(struct slave *slave);
 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
-void bond_mii_monitor(struct work_struct *);
-void bond_loadbalance_arp_mon(struct work_struct *);
-void bond_activebackup_arp_mon(struct work_struct *);
 int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
 int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl);
+int bond_parm_tbl_lookup(int mode, const struct bond_parm_tbl *tbl);
 void bond_select_active_slave(struct bonding *bond);
 void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
 void bond_create_debugfs(void);
@@ -437,10 +451,11 @@ void bond_setup(struct net_device *bond_dev);
 unsigned int bond_get_num_tx_queues(void);
 int bond_netlink_init(void);
 void bond_netlink_fini(void);
-int bond_option_mode_set(struct bonding *bond, int mode);
-int bond_option_active_slave_set(struct bonding *bond, struct net_device *slave_dev);
+int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
+int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
 struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
 struct net_device *bond_option_active_slave_get(struct bonding *bond);
+const char *bond_slave_link_status(s8 link);
 
 struct bond_net {
 	struct net *		net;	/* Associated network namespace */
@@ -520,7 +535,6 @@ static inline int bond_get_targets_ip(__be32 *targets, __be32 ip)
 /* exported from bond_main.c */
 extern int bond_net_id;
 extern const struct bond_parm_tbl bond_lacp_tbl[];
-extern const struct bond_parm_tbl bond_mode_tbl[];
 extern const struct bond_parm_tbl xmit_hashtype_tbl[];
 extern const struct bond_parm_tbl arp_validate_tbl[];
 extern const struct bond_parm_tbl arp_all_targets_tbl[];
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index ee92ad5a6cf8..39ba2f892ad6 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -3,7 +3,6 @@
  * Author:  Daniel Martensson
  * License terms: GNU General Public License (GPL) version 2.
  */
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 3c069472eb8b..d447b881bbde 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -71,7 +71,7 @@ config CAN_AT91
 	  and AT91SAM9X5 processors.
 
 config CAN_TI_HECC
-	depends on ARCH_OMAP3
+	depends on ARM
 	tristate "TI High End CAN Controller"
 	---help---
 	  Driver for TI HECC (High End CAN Controller) module found on many
@@ -104,7 +104,7 @@ config CAN_JANZ_ICAN3
 
 config CAN_FLEXCAN
 	tristate "Support for Freescale FLEXCAN based chips"
-	depends on ARM || PPC
+	depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
 	---help---
 	  Say Y here if you want to support for Freescale FlexCAN.
 
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index cf0f63e14e53..6efe27458116 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -22,7 +22,6 @@
 #include <linux/clk.h>
 #include <linux/errno.h>
 #include <linux/if_arp.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 8a0b515b33ea..8d2b89a12e09 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -9,7 +9,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/bitops.h>
 #include <linux/interrupt.h>
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 77061eebb034..951bfede8f3d 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -808,17 +808,19 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
 	u32 num_rx_pkts = 0;
 	unsigned int msg_obj, msg_ctrl_save;
 	struct c_can_priv *priv = netdev_priv(dev);
-	u32 val = c_can_read_reg32(priv, C_CAN_INTPND1_REG);
+	u16 val;
+
+	/*
+	 * It is faster to read only one 16bit register. This is only possible
+	 * for a maximum number of 16 objects.
+	 */
+	BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16,
+			"Implementation does not support more message objects than 16");
+
+	while (quota > 0 && (val = priv->read_reg(priv, C_CAN_INTPND1_REG))) {
+		while ((msg_obj = ffs(val)) && quota > 0) {
+			val &= ~BIT(msg_obj - 1);
 
-	for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
-			msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
-			val = c_can_read_reg32(priv, C_CAN_INTPND1_REG),
-			msg_obj++) {
-		/*
-		 * as interrupt pending register's bit n-1 corresponds to
-		 * message object n, we need to handle the same properly.
-		 */
-		if (val & (1 << (msg_obj - 1))) {
 			c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
 					~IF_COMM_TXRQST);
 			msg_ctrl_save = priv->read_reg(priv,
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index bda1888cae9a..13a909822e25 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -13,8 +13,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/module.h>
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index ab5909a7bae9..e24e6690d672 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -11,7 +11,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/platform_device.h>
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 08ac401e0214..cdb9808d12db 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -28,8 +28,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  *
  *
@@ -59,6 +58,7 @@
 #include <linux/can/dev.h>
 #include <linux/can/led.h>
 #include <linux/can/platform/mcp251x.h>
+#include <linux/clk.h>
 #include <linux/completion.h>
 #include <linux/delay.h>
 #include <linux/device.h>
@@ -69,6 +69,8 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/spi/spi.h>
@@ -264,6 +266,7 @@ struct mcp251x_priv {
 	int restart_tx;
 	struct regulator *power;
 	struct regulator *transceiver;
+	struct clk *clk;
 };
 
 #define MCP251X_IS(_model) \
@@ -995,22 +998,65 @@ static const struct net_device_ops mcp251x_netdev_ops = {
 	.ndo_start_xmit = mcp251x_hard_start_xmit,
 };
 
+static const struct of_device_id mcp251x_of_match[] = {
+	{
+		.compatible	= "microchip,mcp2510",
+		.data		= (void *)CAN_MCP251X_MCP2510,
+	},
+	{
+		.compatible	= "microchip,mcp2515",
+		.data		= (void *)CAN_MCP251X_MCP2515,
+	},
+	{ }
+};
+MODULE_DEVICE_TABLE(of, mcp251x_of_match);
+
+static const struct spi_device_id mcp251x_id_table[] = {
+	{
+		.name		= "mcp2510",
+		.driver_data	= (kernel_ulong_t)CAN_MCP251X_MCP2510,
+	},
+	{
+		.name		= "mcp2515",
+		.driver_data	= (kernel_ulong_t)CAN_MCP251X_MCP2515,
+	},
+	{ }
+};
+MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
+
 static int mcp251x_can_probe(struct spi_device *spi)
 {
+	const struct of_device_id *of_id = of_match_device(mcp251x_of_match,
+							   &spi->dev);
+	struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
 	struct net_device *net;
 	struct mcp251x_priv *priv;
-	struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
-	int ret = -ENODEV;
+	int freq, ret = -ENODEV;
+	struct clk *clk;
+
+	clk = devm_clk_get(&spi->dev, NULL);
+	if (IS_ERR(clk)) {
+		if (pdata)
+			freq = pdata->oscillator_frequency;
+		else
+			return PTR_ERR(clk);
+	} else {
+		freq = clk_get_rate(clk);
+	}
 
-	if (!pdata)
-		/* Platform data is required for osc freq */
-		goto error_out;
+	/* Sanity check */
+	if (freq < 1000000 || freq > 25000000)
+		return -ERANGE;
 
 	/* Allocate can/net device */
 	net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
-	if (!net) {
-		ret = -ENOMEM;
-		goto error_alloc;
+	if (!net)
+		return -ENOMEM;
+
+	if (!IS_ERR(clk)) {
+		ret = clk_prepare_enable(clk);
+		if (ret)
+			goto out_free;
 	}
 
 	net->netdev_ops = &mcp251x_netdev_ops;
@@ -1019,23 +1065,27 @@ static int mcp251x_can_probe(struct spi_device *spi)
 	priv = netdev_priv(net);
 	priv->can.bittiming_const = &mcp251x_bittiming_const;
 	priv->can.do_set_mode = mcp251x_do_set_mode;
-	priv->can.clock.freq = pdata->oscillator_frequency / 2;
+	priv->can.clock.freq = freq / 2;
 	priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
 		CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
-	priv->model = spi_get_device_id(spi)->driver_data;
+	if (of_id)
+		priv->model = (enum mcp251x_model)of_id->data;
+	else
+		priv->model = spi_get_device_id(spi)->driver_data;
 	priv->net = net;
+	priv->clk = clk;
 
 	priv->power = devm_regulator_get(&spi->dev, "vdd");
 	priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
 	if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
 	    (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
 		ret = -EPROBE_DEFER;
-		goto error_power;
+		goto out_clk;
 	}
 
 	ret = mcp251x_power_enable(priv->power, 1);
 	if (ret)
-		goto error_power;
+		goto out_clk;
 
 	spi_set_drvdata(spi, priv);
 
@@ -1067,15 +1117,17 @@ static int mcp251x_can_probe(struct spi_device *spi)
 
 	/* Allocate non-DMA buffers */
 	if (!mcp251x_enable_dma) {
-		priv->spi_tx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
+		priv->spi_tx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
+						GFP_KERNEL);
 		if (!priv->spi_tx_buf) {
 			ret = -ENOMEM;
-			goto error_tx_buf;
+			goto error_probe;
 		}
-		priv->spi_rx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
+		priv->spi_rx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
+						GFP_KERNEL);
 		if (!priv->spi_rx_buf) {
 			ret = -ENOMEM;
-			goto error_rx_buf;
+			goto error_probe;
 		}
 	}
 
@@ -1108,21 +1160,18 @@ static int mcp251x_can_probe(struct spi_device *spi)
 	return ret;
 
 error_probe:
-	if (!mcp251x_enable_dma)
-		kfree(priv->spi_rx_buf);
-error_rx_buf:
-	if (!mcp251x_enable_dma)
-		kfree(priv->spi_tx_buf);
-error_tx_buf:
 	if (mcp251x_enable_dma)
 		dma_free_coherent(&spi->dev, PAGE_SIZE,
 				  priv->spi_tx_buf, priv->spi_tx_dma);
 	mcp251x_power_enable(priv->power, 0);
-error_power:
+
+out_clk:
+	if (!IS_ERR(clk))
+		clk_disable_unprepare(clk);
+
+out_free:
 	free_candev(net);
-error_alloc:
-	dev_err(&spi->dev, "probe failed\n");
-error_out:
+
 	return ret;
 }
 
@@ -1136,13 +1185,13 @@ static int mcp251x_can_remove(struct spi_device *spi)
 	if (mcp251x_enable_dma) {
 		dma_free_coherent(&spi->dev, PAGE_SIZE,
 				  priv->spi_tx_buf, priv->spi_tx_dma);
-	} else {
-		kfree(priv->spi_tx_buf);
-		kfree(priv->spi_rx_buf);
 	}
 
 	mcp251x_power_enable(priv->power, 0);
 
+	if (!IS_ERR(priv->clk))
+		clk_disable_unprepare(priv->clk);
+
 	free_candev(net);
 
 	return 0;
@@ -1205,21 +1254,13 @@ static int mcp251x_can_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend,
 	mcp251x_can_resume);
 
-static const struct spi_device_id mcp251x_id_table[] = {
-	{ "mcp2510",	CAN_MCP251X_MCP2510 },
-	{ "mcp2515",	CAN_MCP251X_MCP2515 },
-	{ },
-};
-
-MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
-
 static struct spi_driver mcp251x_can_driver = {
 	.driver = {
 		.name = DEVICE_NAME,
 		.owner = THIS_MODULE,
+		.of_match_table = mcp251x_of_match,
 		.pm = &mcp251x_can_pm_ops,
 	},
-
 	.id_table = mcp251x_id_table,
 	.probe = mcp251x_can_probe,
 	.remove = mcp251x_can_remove,
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index e59b3a392af6..035e235e3118 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -16,8 +16,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index a955ec8c4b97..b9f3faabb0f3 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -16,8 +16,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index e98abb97a050..ad8e08f9c496 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #ifndef __MSCAN_H__
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 5f0e9b3bfa7b..6c077eb87b5e 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -12,8 +12,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/interrupt.h>
@@ -22,7 +21,6 @@
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/errno.h>
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 835921388e7b..d790b874ca79 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -13,8 +13,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 087b13bd300e..c96eb14699d5 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -26,8 +26,7 @@
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index f9b4f81cd86a..fbb61a0d901f 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -16,8 +16,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/kernel.h>
@@ -45,7 +44,8 @@ MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
 			"esd CAN-PCI/PMC/266, "
 			"esd CAN-PCIe/2000, "
 			"Connect Tech Inc. CANpro/104-Plus Opto (CRG001), "
-			"IXXAT PC-I 04/PCI")
+			"IXXAT PC-I 04/PCI, "
+			"ELCUS CAN-200-PCI")
 MODULE_LICENSE("GPL v2");
 
 #define PLX_PCI_MAX_CHAN 2
@@ -123,6 +123,11 @@ struct plx_pci_card {
 #define ESD_PCI_SUB_SYS_ID_PCIE2000	0x0200
 #define ESD_PCI_SUB_SYS_ID_PCI104200	0x0501
 
+#define CAN200PCI_DEVICE_ID		0x9030
+#define CAN200PCI_VENDOR_ID		0x10b5
+#define CAN200PCI_SUB_DEVICE_ID		0x0301
+#define CAN200PCI_SUB_VENDOR_ID		0xe1c5
+
 #define IXXAT_PCI_VENDOR_ID		0x10b5
 #define IXXAT_PCI_DEVICE_ID		0x9050
 #define IXXAT_PCI_SUB_SYS_ID		0x2540
@@ -234,6 +239,14 @@ static struct plx_pci_card_info plx_pci_card_info_cti = {
 	/* based on PLX9030 */
 };
 
+static struct plx_pci_card_info plx_pci_card_info_elcus = {
+	"Eclus CAN-200-PCI", 2,
+	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+	{1, 0x00, 0x00}, { {2, 0x00, 0x80}, {3, 0x00, 0x80} },
+	&plx_pci_reset_common
+	/* based on PLX9030 */
+};
+
 static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
 	{
 		/* Adlink PCI-7841/cPCI-7841 */
@@ -319,6 +332,13 @@ static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
 		0, 0,
 		(kernel_ulong_t)&plx_pci_card_info_cti
 	},
+	{
+		/* Elcus CAN-200-PCI */
+		CAN200PCI_VENDOR_ID, CAN200PCI_DEVICE_ID,
+		CAN200PCI_SUB_VENDOR_ID, CAN200PCI_SUB_DEVICE_ID,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_elcus
+	},
 	{ 0,}
 };
 MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index 06a282397fff..df136a2516c4 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -11,8 +11,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 047accd4ede5..2f6e24534231 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -13,8 +13,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /* This is a generic driver for SJA1000 chips on the OpenFirmware platform
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 29f9b6321187..943df645b459 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -12,8 +12,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 25377e547f9b..3fcdae266377 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -18,9 +18,7 @@
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307. You can also get it
- * at http://www.gnu.org/licenses/gpl.html
+ * with this program; if not, see http://www.gnu.org/licenses/gpl.html
  *
  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
index 498605f833dd..cdc0c7433a4b 100644
--- a/drivers/net/can/softing/softing_cs.c
+++ b/drivers/net/can/softing/softing_cs.c
@@ -13,8 +13,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/module.h>
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index b595d3422b9f..52fe50725d74 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -13,8 +13,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/firmware.h>
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 6cd5c01b624d..9ea0dcde94ce 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -13,12 +13,10 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <asm/io.h>
 
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 60d95b44d0f7..2c62fe6c8fa9 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -37,7 +37,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/interrupt.h>
@@ -518,10 +517,10 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
 		data = (cf->can_id & CAN_SFF_MASK) << 18;
 	hecc_write_mbx(priv, mbxno, HECC_CANMID, data);
 	hecc_write_mbx(priv, mbxno, HECC_CANMDL,
-		be32_to_cpu(*(u32 *)(cf->data)));
+		be32_to_cpu(*(__be32 *)(cf->data)));
 	if (cf->can_dlc > 4)
 		hecc_write_mbx(priv, mbxno, HECC_CANMDH,
-			be32_to_cpu(*(u32 *)(cf->data + 4)));
+			be32_to_cpu(*(__be32 *)(cf->data + 4)));
 	else
 		*(u32 *)(cf->data + 4) = 0;
 	can_put_echo_skb(skb, ndev, mbxno);
@@ -569,12 +568,10 @@ static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
 		cf->can_id |= CAN_RTR_FLAG;
 	cf->can_dlc = get_can_dlc(data & 0xF);
 	data = hecc_read_mbx(priv, mbxno, HECC_CANMDL);
-	*(u32 *)(cf->data) = cpu_to_be32(data);
+	*(__be32 *)(cf->data) = cpu_to_be32(data);
 	if (cf->can_dlc > 4) {
 		data = hecc_read_mbx(priv, mbxno, HECC_CANMDH);
-		*(u32 *)(cf->data + 4) = cpu_to_be32(data);
-	} else {
-		*(u32 *)(cf->data + 4) = 0;
+		*(__be32 *)(cf->data + 4) = cpu_to_be32(data);
 	}
 	spin_lock_irqsave(&priv->mbx_lock, flags);
 	hecc_clear_bit(priv, HECC_CANME, mbx_mask);
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 8aeec0b4601a..52c42fd49510 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -16,7 +16,6 @@
  * with this program; if not, write to the Free Software Foundation, Inc.,
  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  */
-#include <linux/init.h>
 #include <linux/signal.h>
 #include <linux/slab.h>
 #include <linux/module.h>
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index ac6177d3befc..7fbe85935f1d 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -16,7 +16,6 @@
  * with this program; if not, write to the Free Software Foundation, Inc.,
  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  */
-#include <linux/init.h>
 #include <linux/signal.h>
 #include <linux/slab.h>
 #include <linux/module.h>
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 4b2d5ed62b11..6c859bba8b65 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -12,7 +12,6 @@
  * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
  */
 
-#include <linux/init.h>
 #include <linux/completion.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 8becd3d838b5..a0fa1fd5092b 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -23,7 +23,6 @@
  * who were very cooperative and answered my questions.
  */
 
-#include <linux/init.h>
 #include <linux/signal.h>
 #include <linux/slab.h>
 #include <linux/module.h>
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index f219d38acf58..7a79b6046879 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -395,6 +395,7 @@ static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
 		if (duplicate_slave)
 			eql_kill_one_slave(queue, duplicate_slave);
 
+		dev_hold(slave->dev);
 		list_add(&slave->list, &queue->all_slaves);
 		queue->num_slaves++;
 		slave->dev->flags |= IFF_SLAVE;
@@ -413,39 +414,35 @@ static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *
 	if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
 		return -EFAULT;
 
-	slave_dev  = dev_get_by_name(&init_net, srq.slave_name);
-	if (slave_dev) {
-		if ((master_dev->flags & IFF_UP) == IFF_UP) {
-			/* slave is not a master & not already a slave: */
-			if (!eql_is_master(slave_dev) &&
-			    !eql_is_slave(slave_dev)) {
-				slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
-				equalizer_t *eql = netdev_priv(master_dev);
-				int ret;
-
-				if (!s) {
-					dev_put(slave_dev);
-					return -ENOMEM;
-				}
-
-				memset(s, 0, sizeof(*s));
-				s->dev = slave_dev;
-				s->priority = srq.priority;
-				s->priority_bps = srq.priority;
-				s->priority_Bps = srq.priority / 8;
-
-				spin_lock_bh(&eql->queue.lock);
-				ret = __eql_insert_slave(&eql->queue, s);
-				if (ret) {
-					dev_put(slave_dev);
-					kfree(s);
-				}
-				spin_unlock_bh(&eql->queue.lock);
-
-				return ret;
-			}
+	slave_dev = __dev_get_by_name(&init_net, srq.slave_name);
+	if (!slave_dev)
+		return -ENODEV;
+
+	if ((master_dev->flags & IFF_UP) == IFF_UP) {
+		/* slave is not a master & not already a slave: */
+		if (!eql_is_master(slave_dev) && !eql_is_slave(slave_dev)) {
+			slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
+			equalizer_t *eql = netdev_priv(master_dev);
+			int ret;
+
+			if (!s)
+				return -ENOMEM;
+
+			memset(s, 0, sizeof(*s));
+			s->dev = slave_dev;
+			s->priority = srq.priority;
+			s->priority_bps = srq.priority;
+			s->priority_Bps = srq.priority / 8;
+
+			spin_lock_bh(&eql->queue.lock);
+			ret = __eql_insert_slave(&eql->queue, s);
+			if (ret)
+				kfree(s);
+
+			spin_unlock_bh(&eql->queue.lock);
+
+			return ret;
 		}
-		dev_put(slave_dev);
 	}
 
 	return -EINVAL;
@@ -461,24 +458,20 @@ static int eql_emancipate(struct net_device *master_dev, slaving_request_t __use
 	if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
 		return -EFAULT;
 
-	slave_dev = dev_get_by_name(&init_net, srq.slave_name);
-	ret = -EINVAL;
-	if (slave_dev) {
-		spin_lock_bh(&eql->queue.lock);
-
-		if (eql_is_slave(slave_dev)) {
-			slave_t *slave = __eql_find_slave_dev(&eql->queue,
-							      slave_dev);
+	slave_dev = __dev_get_by_name(&init_net, srq.slave_name);
+	if (!slave_dev)
+		return -ENODEV;
 
-			if (slave) {
-				eql_kill_one_slave(&eql->queue, slave);
-				ret = 0;
-			}
+	ret = -EINVAL;
+	spin_lock_bh(&eql->queue.lock);
+	if (eql_is_slave(slave_dev)) {
+		slave_t *slave = __eql_find_slave_dev(&eql->queue, slave_dev);
+		if (slave) {
+			eql_kill_one_slave(&eql->queue, slave);
+			ret = 0;
 		}
-		dev_put(slave_dev);
-
-		spin_unlock_bh(&eql->queue.lock);
 	}
+	spin_unlock_bh(&eql->queue.lock);
 
 	return ret;
 }
@@ -494,7 +487,7 @@ static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
 	if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
 		return -EFAULT;
 
-	slave_dev = dev_get_by_name(&init_net, sc.slave_name);
+	slave_dev = __dev_get_by_name(&init_net, sc.slave_name);
 	if (!slave_dev)
 		return -ENODEV;
 
@@ -510,8 +503,6 @@ static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
 	}
 	spin_unlock_bh(&eql->queue.lock);
 
-	dev_put(slave_dev);
-
 	if (!ret && copy_to_user(scp, &sc, sizeof (slave_config_t)))
 		ret = -EFAULT;
 
@@ -529,7 +520,7 @@ static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
 	if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
 		return -EFAULT;
 
-	slave_dev = dev_get_by_name(&init_net, sc.slave_name);
+	slave_dev = __dev_get_by_name(&init_net, sc.slave_name);
 	if (!slave_dev)
 		return -ENODEV;
 
@@ -548,8 +539,6 @@ static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
 	}
 	spin_unlock_bh(&eql->queue.lock);
 
-	dev_put(slave_dev);
-
 	return ret;
 }
 
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index ede8daa68275..c53384d41c96 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -252,8 +252,7 @@ static int el3_isa_id_sequence(__be16 *phys_addr)
 		for (i = 0; i < el3_cards; i++) {
 			struct el3_private *lp = netdev_priv(el3_devs[i]);
 			if (lp->type == EL3_PNP &&
-			    !memcmp(phys_addr, el3_devs[i]->dev_addr,
-				    ETH_ALEN)) {
+			    ether_addr_equal((u8 *)phys_addr, el3_devs[i]->dev_addr)) {
 				if (el3_debug > 3)
 					pr_debug("3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n",
 						phys_addr[0] & 0xff, phys_addr[0] >> 8,
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index 6fc994fa4abe..b9948f00c5e9 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -73,7 +73,6 @@ earlier 3Com products.
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/timer.h>
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 078480aaa168..5992860a39c9 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -25,7 +25,6 @@
 #define DRV_VERSION	"1.162-ac"
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/ptrace.h>
 #include <linux/slab.h>
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index ad5272b348f0..0f4241c6e97e 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -693,7 +693,7 @@ DEFINE_WINDOW_IO(16)
 DEFINE_WINDOW_IO(32)
 
 #ifdef CONFIG_PCI
-#define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL)
+#define DEVICE_PCI(dev) ((dev_is_pci(dev)) ? to_pci_dev((dev)) : NULL)
 #else
 #define DEVICE_PCI(dev) NULL
 #endif
@@ -2079,10 +2079,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		iowrite16(len, ioaddr + Wn7_MasterLen);
 		spin_unlock_irq(&vp->window_lock);
 		vp->tx_skb = skb;
+		skb_tx_timestamp(skb);
 		iowrite16(StartDMADown, ioaddr + EL3_CMD);
 		/* netif_wake_queue() will be called at the DMADone interrupt. */
 	} else {
 		/* ... and the packet rounded to a doubleword. */
+		skb_tx_timestamp(skb);
 		iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
 		dev_kfree_skb (skb);
 		if (ioread16(ioaddr + TxFree) > 1536) {
@@ -2212,6 +2214,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
 #endif
 	}
+	skb_tx_timestamp(skb);
 	iowrite16(DownUnstall, ioaddr + EL3_CMD);
 	spin_unlock_irqrestore(&vp->lock, flags);
 	return NETDEV_TX_OK;
@@ -2986,6 +2989,7 @@ static const struct ethtool_ops vortex_ethtool_ops = {
 	.nway_reset             = vortex_nway_reset,
 	.get_wol                = vortex_get_wol,
 	.set_wol                = vortex_set_wol,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 #ifdef CONFIG_PCI
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
index 2923c51bb351..3e2f2c2e7b58 100644
--- a/drivers/net/ethernet/8390/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
@@ -21,12 +21,6 @@ struct e8390_pkt_hdr {
   unsigned short count; /* header + packet length in bytes */
 };
 
-#ifdef notdef
-extern int ei_debug;
-#else
-#define ei_debug 1
-#endif
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
 void ei_poll(struct net_device *dev);
 void eip_poll(struct net_device *dev);
@@ -99,6 +93,7 @@ struct ei_device {
 	u32 *reg_offset;		/* Register mapping table */
 	spinlock_t page_lock;		/* Page register locks */
 	unsigned long priv;		/* Private field to store bus IDs etc. */
+	u32 msg_enable;			/* debug message level */
 #ifdef AX88796_PLATFORM
 	unsigned char rxcr_base;	/* default value for RXCR */
 #endif
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 912ed7a5f33a..811fa5d5c697 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -116,9 +116,15 @@ static const char version[] =
 
 static int apne_owned;	/* signal if card already owned */
 
+static u32 apne_msg_enable;
+module_param_named(msg_enable, apne_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
+
 struct net_device * __init apne_probe(int unit)
 {
 	struct net_device *dev;
+	struct ei_device *ei_local;
+
 #ifndef MANUAL_CONFIG
 	char tuple[8];
 #endif
@@ -133,11 +139,11 @@ struct net_device * __init apne_probe(int unit)
 	if ( !(AMIGAHW_PRESENT(PCMCIA)) )
 		return ERR_PTR(-ENODEV);
 
-	printk("Looking for PCMCIA ethernet card : ");
+	pr_info("Looking for PCMCIA ethernet card : ");
 
 	/* check if a card is inserted */
 	if (!(PCMCIA_INSERTED)) {
-		printk("NO PCMCIA card inserted\n");
+		pr_cont("NO PCMCIA card inserted\n");
 		return ERR_PTR(-ENODEV);
 	}
 
@@ -148,6 +154,8 @@ struct net_device * __init apne_probe(int unit)
 		sprintf(dev->name, "eth%d", unit);
 		netdev_boot_setup_check(dev);
 	}
+	ei_local = netdev_priv(dev);
+	ei_local->msg_enable = apne_msg_enable;
 
 	/* disable pcmcia irq for readtuple */
 	pcmcia_disable_irq();
@@ -155,14 +163,14 @@ struct net_device * __init apne_probe(int unit)
 #ifndef MANUAL_CONFIG
 	if ((pcmcia_copy_tuple(CISTPL_FUNCID, tuple, 8) < 3) ||
 		(tuple[2] != CISTPL_FUNCID_NETWORK)) {
-		printk("not an ethernet card\n");
+		pr_cont("not an ethernet card\n");
 		/* XXX: shouldn't we re-enable irq here? */
 		free_netdev(dev);
 		return ERR_PTR(-ENODEV);
 	}
 #endif
 
-	printk("ethernet PCMCIA card inserted\n");
+	pr_cont("ethernet PCMCIA card inserted\n");
 
 	if (!init_pcmcia()) {
 		/* XXX: shouldn't we re-enable irq here? */
@@ -204,11 +212,12 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
     int neX000, ctron;
 #endif
     static unsigned version_printed;
+    struct ei_device *ei_local = netdev_priv(dev);
 
-    if (ei_debug  &&  version_printed++ == 0)
-	printk(version);
+    if ((apne_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+		netdev_info(dev, version);
 
-    printk("PCMCIA NE*000 ethercard probe");
+    netdev_info(dev, "PCMCIA NE*000 ethercard probe");
 
     /* Reset card. Who knows what dain-bramaged state it was left in. */
     {	unsigned long reset_start_time = jiffies;
@@ -217,7 +226,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
 
 	while ((inb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0)
 		if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
-			printk(" not found (no reset ack).\n");
+			pr_cont(" not found (no reset ack).\n");
 			return -ENODEV;
 		}
 
@@ -288,7 +297,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
 	start_page = 0x01;
 	stop_page = (wordlength == 2) ? 0x40 : 0x20;
     } else {
-	printk(" not found.\n");
+	pr_cont(" not found.\n");
 	return -ENXIO;
 
     }
@@ -320,9 +329,9 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
     for (i = 0; i < ETH_ALEN; i++)
 	dev->dev_addr[i] = SA_prom[i];
 
-    printk(" %pM\n", dev->dev_addr);
+    pr_cont(" %pM\n", dev->dev_addr);
 
-    printk("%s: %s found.\n", dev->name, name);
+    netdev_info(dev, "%s found.\n", name);
 
     ei_status.name = name;
     ei_status.tx_start_page = start_page;
@@ -352,10 +361,11 @@ static void
 apne_reset_8390(struct net_device *dev)
 {
     unsigned long reset_start_time = jiffies;
+    struct ei_device *ei_local = netdev_priv(dev);
 
     init_pcmcia();
 
-    if (ei_debug > 1) printk("resetting the 8390 t=%ld...", jiffies);
+    netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
 
     outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
 
@@ -365,8 +375,8 @@ apne_reset_8390(struct net_device *dev)
     /* This check _should_not_ be necessary, omit eventually. */
     while ((inb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0)
 	if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
-	    printk("%s: ne_reset_8390() did not complete.\n", dev->name);
-	    break;
+		netdev_err(dev, "ne_reset_8390() did not complete.\n");
+		break;
 	}
     outb(ENISR_RESET, NE_BASE + NE_EN0_ISR);	/* Ack intr. */
 }
@@ -386,9 +396,9 @@ apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_pa
 
     /* This *shouldn't* happen. If it does, it's the last thing you'll see */
     if (ei_status.dmaing) {
-	printk("%s: DMAing conflict in ne_get_8390_hdr "
-	   "[DMAstat:%d][irqlock:%d][intr:%d].\n",
-	   dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+	netdev_err(dev, "DMAing conflict in ne_get_8390_hdr "
+		   "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+		   ei_status.dmaing, ei_status.irqlock, dev->irq);
 	return;
     }
 
@@ -433,9 +443,9 @@ apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int rin
 
     /* This *shouldn't* happen. If it does, it's the last thing you'll see */
     if (ei_status.dmaing) {
-	printk("%s: DMAing conflict in ne_block_input "
-	   "[DMAstat:%d][irqlock:%d][intr:%d].\n",
-	   dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+		netdev_err(dev, "DMAing conflict in ne_block_input "
+			   "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+			   ei_status.dmaing, ei_status.irqlock, dev->irq);
 	return;
     }
     ei_status.dmaing |= 0x01;
@@ -481,9 +491,9 @@ apne_block_output(struct net_device *dev, int count,
 
     /* This *shouldn't* happen. If it does, it's the last thing you'll see */
     if (ei_status.dmaing) {
-	printk("%s: DMAing conflict in ne_block_output."
-	   "[DMAstat:%d][irqlock:%d][intr:%d]\n",
-	   dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+		netdev_err(dev, "DMAing conflict in ne_block_output."
+			   "[DMAstat:%d][irqlock:%d][intr:%d]\n",
+			   ei_status.dmaing, ei_status.irqlock, dev->irq);
 	return;
     }
     ei_status.dmaing |= 0x01;
@@ -513,7 +523,7 @@ apne_block_output(struct net_device *dev, int count,
 
     while ((inb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
 	if (time_after(jiffies, dma_start + 2*HZ/100)) {	/* 20ms */
-		printk("%s: timeout waiting for Tx RDC.\n", dev->name);
+		netdev_warn(dev, "timeout waiting for Tx RDC.\n");
 		apne_reset_8390(dev);
 		NS8390_init(dev,1);
 		break;
@@ -536,8 +546,8 @@ static irqreturn_t apne_interrupt(int irq, void *dev_id)
         pcmcia_ack_int(pcmcia_intreq);
         return IRQ_NONE;
     }
-    if (ei_debug > 3)
-        printk("pcmcia intreq = %x\n", pcmcia_intreq);
+    if (apne_msg_enable & NETIF_MSG_INTR)
+	pr_debug("pcmcia intreq = %x\n", pcmcia_intreq);
     pcmcia_disable_irq();			/* to get rid of the sti() within ei_interrupt */
     ei_interrupt(irq, dev_id);
     pcmcia_ack_int(pcmcia_get_intreq());
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 36fa577970bb..455d4c399b52 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -15,7 +15,6 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/isapnp.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
@@ -78,6 +77,8 @@ static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electron
 
 #define AX_GPOC_PPDSET	BIT(6)
 
+static u32 ax_msg_enable;
+
 /* device private data */
 
 struct ax_device {
@@ -147,8 +148,7 @@ static void ax_reset_8390(struct net_device *dev)
 	unsigned long reset_start_time = jiffies;
 	void __iomem *addr = (void __iomem *)dev->base_addr;
 
-	if (ei_debug > 1)
-		netdev_dbg(dev, "resetting the 8390 t=%ld\n", jiffies);
+	netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
 
 	ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
 
@@ -496,12 +496,28 @@ static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 	return phy_ethtool_sset(phy_dev, cmd);
 }
 
+static u32 ax_get_msglevel(struct net_device *dev)
+{
+	struct ei_device *ei_local = netdev_priv(dev);
+
+	return ei_local->msg_enable;
+}
+
+static void ax_set_msglevel(struct net_device *dev, u32 v)
+{
+	struct ei_device *ei_local = netdev_priv(dev);
+
+	ei_local->msg_enable = v;
+}
+
 static const struct ethtool_ops ax_ethtool_ops = {
 	.get_drvinfo		= ax_get_drvinfo,
 	.get_settings		= ax_get_settings,
 	.set_settings		= ax_set_settings,
 	.get_link		= ethtool_op_get_link,
 	.get_ts_info		= ethtool_op_get_ts_info,
+	.get_msglevel		= ax_get_msglevel,
+	.set_msglevel		= ax_set_msglevel,
 };
 
 #ifdef CONFIG_AX88796_93CX6
@@ -763,6 +779,7 @@ static int ax_init_dev(struct net_device *dev)
 	ei_local->block_output = &ax_block_output;
 	ei_local->get_8390_hdr = &ax_get_8390_hdr;
 	ei_local->priv = 0;
+	ei_local->msg_enable = ax_msg_enable;
 
 	dev->netdev_ops = &ax_netdev_ops;
 	dev->ethtool_ops = &ax_ethtool_ops;
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index d801c1410fb0..73c57a4a7b9e 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -28,7 +28,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/ptrace.h>
 #include <linux/string.h>
 #include <linux/timer.h>
@@ -105,6 +104,7 @@ static void AX88190_init(struct net_device *dev, int startp);
 static int ax_open(struct net_device *dev);
 static int ax_close(struct net_device *dev);
 static irqreturn_t ax_interrupt(int irq, void *dev_id);
+static u32 axnet_msg_enable;
 
 /*====================================================================*/
 
@@ -152,6 +152,7 @@ static int axnet_probe(struct pcmcia_device *link)
 	return -ENOMEM;
 
     ei_local = netdev_priv(dev);
+    ei_local->msg_enable = axnet_msg_enable;
     spin_lock_init(&ei_local->page_lock);
 
     info = PRIV(dev);
@@ -650,11 +651,12 @@ static void block_input(struct net_device *dev, int count,
 			struct sk_buff *skb, int ring_offset)
 {
     unsigned int nic_base = dev->base_addr;
+    struct ei_device *ei_local = netdev_priv(dev);
     int xfer_count = count;
     char *buf = skb->data;
 
-    if ((ei_debug > 4) && (count != 4))
-	    pr_debug("%s: [bi=%d]\n", dev->name, count+4);
+    if ((netif_msg_rx_status(ei_local)) && (count != 4))
+	netdev_dbg(dev, "[bi=%d]\n", count+4);
     outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
     outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
     outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
@@ -810,11 +812,6 @@ module_pcmcia_driver(axnet_cs_driver);
 #define ei_block_input (ei_local->block_input)
 #define ei_get_8390_hdr (ei_local->get_8390_hdr)
 
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef ei_debug
-int ei_debug = 1;
-#endif
-
 /* Index to functions. */
 static void ei_tx_intr(struct net_device *dev);
 static void ei_tx_err(struct net_device *dev);
@@ -925,11 +922,10 @@ static void axnet_tx_timeout(struct net_device *dev)
 	isr = inb(e8390_base+EN0_ISR);
 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
 
-	netdev_printk(KERN_DEBUG, dev,
-		      "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
-		      (txsr & ENTSR_ABT) ? "excess collisions." :
-		      (isr) ? "lost interrupt?" : "cable problem?",
-		      txsr, isr, tickssofar);
+	netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
+		   (txsr & ENTSR_ABT) ? "excess collisions." :
+		   (isr) ? "lost interrupt?" : "cable problem?",
+		   txsr, isr, tickssofar);
 
 	if (!isr && !dev->stats.tx_packets) 
 	{
@@ -998,29 +994,30 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
 	{
 		output_page = ei_local->tx_start_page;
 		ei_local->tx1 = send_length;
-		if (ei_debug  &&  ei_local->tx2 > 0)
-			netdev_printk(KERN_DEBUG, dev,
-				      "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
-				      ei_local->tx2, ei_local->lasttx,
-				      ei_local->txing);
+		if ((netif_msg_tx_queued(ei_local)) &&
+		    ei_local->tx2 > 0)
+			netdev_dbg(dev,
+				   "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
+				   ei_local->tx2, ei_local->lasttx,
+				   ei_local->txing);
 	}
 	else if (ei_local->tx2 == 0) 
 	{
 		output_page = ei_local->tx_start_page + TX_PAGES/2;
 		ei_local->tx2 = send_length;
-		if (ei_debug  &&  ei_local->tx1 > 0)
-			netdev_printk(KERN_DEBUG, dev,
-				      "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
-				      ei_local->tx1, ei_local->lasttx,
-				      ei_local->txing);
+		if ((netif_msg_tx_queued(ei_local)) &&
+		    ei_local->tx1 > 0)
+			netdev_dbg(dev,
+				   "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
+				   ei_local->tx1, ei_local->lasttx,
+				   ei_local->txing);
 	}
 	else
 	{	/* We should never get here. */
-		if (ei_debug)
-			netdev_printk(KERN_DEBUG, dev,
-				      "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
-				      ei_local->tx1, ei_local->tx2,
-				      ei_local->lasttx);
+		netif_dbg(ei_local, tx_err, dev,
+			  "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+			  ei_local->tx1, ei_local->tx2,
+			  ei_local->lasttx);
 		ei_local->irqlock = 0;
 		netif_stop_queue(dev);
 		outb_p(ENISR_ALL, e8390_base + EN0_IMR);
@@ -1124,10 +1121,9 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
 		spin_unlock_irqrestore(&ei_local->page_lock, flags);
 		return IRQ_NONE;
 	}
-    
-	if (ei_debug > 3)
-		netdev_printk(KERN_DEBUG, dev, "interrupt(isr=%#2.2x)\n",
-			      inb_p(e8390_base + EN0_ISR));
+
+	netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n",
+		  inb_p(e8390_base + EN0_ISR));
 
 	outb_p(0x00, e8390_base + EN0_ISR);
 	ei_local->irqlock = 1;
@@ -1137,9 +1133,8 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
 	       ++nr_serviced < MAX_SERVICE)
 	{
 		if (!netif_running(dev) || (interrupts == 0xff)) {
-			if (ei_debug > 1)
-				netdev_warn(dev,
-					    "interrupt from stopped card\n");
+			netif_warn(ei_local, intr, dev,
+				   "interrupt from stopped card\n");
 			outb_p(interrupts, e8390_base + EN0_ISR);
 			interrupts = 0;
 			break;
@@ -1175,14 +1170,15 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
 		}
 	}
     
-	if (interrupts && ei_debug > 3) 
+	if (interrupts && (netif_msg_intr(ei_local)))
 	{
 		handled = 1;
 		if (nr_serviced >= MAX_SERVICE) 
 		{
 			/* 0xFF is valid for a card removal */
-			if(interrupts!=0xFF)
-				netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
+			if (interrupts != 0xFF)
+				netdev_warn(dev,
+					    "Too much work at interrupt, status %#2.2x\n",
 					    interrupts);
 			outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
 		} else {
@@ -1221,8 +1217,7 @@ static void ei_tx_err(struct net_device *dev)
 	unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
 
 #ifdef VERBOSE_ERROR_DUMP
-	netdev_printk(KERN_DEBUG, dev,
-		      "transmitter error (%#2x):", txsr);
+	netdev_dbg(dev, "transmitter error (%#2x):", txsr);
 	if (txsr & ENTSR_ABT)
 		pr_cont(" excess-collisions");
 	if (txsr & ENTSR_ND)
@@ -1287,9 +1282,9 @@ static void ei_tx_intr(struct net_device *dev)
 	else if (ei_local->tx2 < 0) 
 	{
 		if (ei_local->lasttx != 2  &&  ei_local->lasttx != -2)
-			netdev_info(dev, "%s: bogus last_tx_buffer %d, tx2=%d\n",
-				    ei_local->name, ei_local->lasttx,
-				    ei_local->tx2);
+			netdev_err(dev, "%s: bogus last_tx_buffer %d, tx2=%d\n",
+				   ei_local->name, ei_local->lasttx,
+				   ei_local->tx2);
 		ei_local->tx2 = 0;
 		if (ei_local->tx1 > 0) 
 		{
@@ -1366,9 +1361,11 @@ static void ei_receive(struct net_device *dev)
 		   Keep quiet if it looks like a card removal. One problem here
 		   is that some clones crash in roughly the same way.
 		 */
-		if (ei_debug > 0  &&  this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
-		    netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
-			       this_frame, ei_local->current_page);
+		if ((netif_msg_rx_err(ei_local)) &&
+		    this_frame != ei_local->current_page &&
+		    (this_frame != 0x0 || rxing_page != 0xFF))
+			netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
+				   this_frame, ei_local->current_page);
 		
 		if (this_frame == rxing_page)	/* Read all the frames? */
 			break;				/* Done for now */
@@ -1383,11 +1380,10 @@ static void ei_receive(struct net_device *dev)
 		
 		if (pkt_len < 60  ||  pkt_len > 1518) 
 		{
-			if (ei_debug)
-				netdev_printk(KERN_DEBUG, dev,
-					      "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
-					      rx_frame.count, rx_frame.status,
-					      rx_frame.next);
+			netif_err(ei_local, rx_err, dev,
+				  "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
+				  rx_frame.count, rx_frame.status,
+				  rx_frame.next);
 			dev->stats.rx_errors++;
 			dev->stats.rx_length_errors++;
 		}
@@ -1398,10 +1394,9 @@ static void ei_receive(struct net_device *dev)
 			skb = netdev_alloc_skb(dev, pkt_len + 2);
 			if (skb == NULL) 
 			{
-				if (ei_debug > 1)
-					netdev_printk(KERN_DEBUG, dev,
-						      "Couldn't allocate a sk_buff of size %d\n",
-						      pkt_len);
+				netif_err(ei_local, rx_err, dev,
+					  "Couldn't allocate a sk_buff of size %d\n",
+					  pkt_len);
 				dev->stats.rx_dropped++;
 				break;
 			}
@@ -1420,11 +1415,10 @@ static void ei_receive(struct net_device *dev)
 		} 
 		else 
 		{
-			if (ei_debug)
-				netdev_printk(KERN_DEBUG, dev,
-					      "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
-					      rx_frame.status, rx_frame.next,
-					      rx_frame.count);
+			netif_err(ei_local, rx_err, dev,
+				  "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+				  rx_frame.status, rx_frame.next,
+				  rx_frame.count);
 			dev->stats.rx_errors++;
 			/* NB: The NIC counts CRC, frame and missed errors. */
 			if (pkt_stat & ENRSR_FO)
@@ -1461,6 +1455,7 @@ static void ei_rx_overrun(struct net_device *dev)
 	axnet_dev_t *info = PRIV(dev);
 	long e8390_base = dev->base_addr;
 	unsigned char was_txing, must_resend = 0;
+	struct ei_device *ei_local = netdev_priv(dev);
     
 	/*
 	 * Record whether a Tx was in progress and then issue the
@@ -1468,9 +1463,8 @@ static void ei_rx_overrun(struct net_device *dev)
 	 */
 	was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
 	outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
-    
-	if (ei_debug > 1)
-		netdev_printk(KERN_DEBUG, dev, "Receiver overrun\n");
+
+	netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n");
 	dev->stats.rx_over_errors++;
     
 	/* 
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index 78c6fb4b1143..b36ee9e0d220 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -56,18 +56,15 @@
 #define ei_inb_p(_p)	 readb((void __iomem *)_p)
 #define ei_outb_p(_v,_p) writeb(_v,(void __iomem *)_p)
 
-#define NET_DEBUG  0
-#define DEBUG_INIT 2
-
 #define DRV_NAME	"etherh"
 #define DRV_VERSION	"1.11"
 
-static char version[] __initdata =
+static char version[] =
 	"EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n";
 
 #include "lib8390.c"
 
-static unsigned int net_debug = NET_DEBUG;
+static u32 etherh_msg_enable;
 
 struct etherh_priv {
 	void __iomem	*ioc_fast;
@@ -317,9 +314,9 @@ etherh_block_output (struct net_device *dev, int count, const unsigned char *buf
 	void __iomem *dma_base, *addr;
 
 	if (ei_local->dmaing) {
-		printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: "
-			" DMAstat %d irqlock %d\n", dev->name,
-			ei_local->dmaing, ei_local->irqlock);
+		netdev_err(dev, "DMAing conflict in etherh_block_input: "
+			   " DMAstat %d irqlock %d\n",
+			   ei_local->dmaing, ei_local->irqlock);
 		return;
 	}
 
@@ -361,8 +358,7 @@ etherh_block_output (struct net_device *dev, int count, const unsigned char *buf
 
 	while ((readb (addr + EN0_ISR) & ENISR_RDC) == 0)
 		if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
-			printk(KERN_ERR "%s: timeout waiting for TX RDC\n",
-				dev->name);
+			netdev_warn(dev, "timeout waiting for TX RDC\n");
 			etherh_reset (dev);
 			__NS8390_init (dev, 1);
 			break;
@@ -383,9 +379,9 @@ etherh_block_input (struct net_device *dev, int count, struct sk_buff *skb, int
 	void __iomem *dma_base, *addr;
 
 	if (ei_local->dmaing) {
-		printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: "
-			" DMAstat %d irqlock %d\n", dev->name,
-			ei_local->dmaing, ei_local->irqlock);
+		netdev_err(dev, "DMAing conflict in etherh_block_input: "
+			   " DMAstat %d irqlock %d\n",
+			   ei_local->dmaing, ei_local->irqlock);
 		return;
 	}
 
@@ -423,9 +419,9 @@ etherh_get_header (struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_p
 	void __iomem *dma_base, *addr;
 
 	if (ei_local->dmaing) {
-		printk(KERN_ERR "%s: DMAing conflict in etherh_get_header: "
-			" DMAstat %d irqlock %d\n", dev->name,
-			ei_local->dmaing, ei_local->irqlock);
+		netdev_err(dev, "DMAing conflict in etherh_get_header: "
+			   " DMAstat %d irqlock %d\n",
+			   ei_local->dmaing, ei_local->irqlock);
 		return;
 	}
 
@@ -513,8 +509,8 @@ static void __init etherh_banner(void)
 {
 	static int version_printed;
 
-	if (net_debug && version_printed++ == 0)
-		printk(KERN_INFO "%s", version);
+	if ((etherh_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+		pr_info("%s", version);
 }
 
 /*
@@ -625,11 +621,27 @@ static int etherh_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 	return 0;
 }
 
+static u32 etherh_get_msglevel(struct net_device *dev)
+{
+	struct ei_device *ei_local = netdev_priv(dev);
+
+	return ei_local->msg_enable;
+}
+
+static void etherh_set_msglevel(struct net_device *dev, u32 v)
+{
+	struct ei_device *ei_local = netdev_priv(dev);
+
+	ei_local->msg_enable = v;
+}
+
 static const struct ethtool_ops etherh_ethtool_ops = {
 	.get_settings	= etherh_get_settings,
 	.set_settings	= etherh_set_settings,
 	.get_drvinfo	= etherh_get_drvinfo,
 	.get_ts_info	= ethtool_op_get_ts_info,
+	.get_msglevel	= etherh_get_msglevel,
+	.set_msglevel	= etherh_set_msglevel,
 };
 
 static const struct net_device_ops etherh_netdev_ops = {
@@ -746,6 +758,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
 	ei_local->block_output  = etherh_block_output;
 	ei_local->get_8390_hdr  = etherh_get_header;
 	ei_local->interface_num = 0;
+	ei_local->msg_enable = etherh_msg_enable;
 
 	etherh_reset(dev);
 	__NS8390_init(dev, 0);
@@ -754,8 +767,8 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
 	if (ret)
 		goto free;
 
-	printk(KERN_INFO "%s: %s in slot %d, %pM\n",
-		dev->name, data->name, ec->slot_no, dev->dev_addr);
+	netdev_info(dev, "%s in slot %d, %pM\n",
+		    data->name, ec->slot_no, dev->dev_addr);
 
 	ecard_set_drvdata(ec, dev);
 
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index f615fdec0f1b..0fe19d609c2e 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -66,6 +66,7 @@ static void hydra_block_input(struct net_device *dev, int count,
 static void hydra_block_output(struct net_device *dev, int count,
 			       const unsigned char *buf, int start_page);
 static void hydra_remove_one(struct zorro_dev *z);
+static u32 hydra_msg_enable;
 
 static struct zorro_device_id hydra_zorro_tbl[] = {
     { ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET },
@@ -119,6 +120,7 @@ static int hydra_init(struct zorro_dev *z)
     int start_page, stop_page;
     int j;
     int err;
+    struct ei_device *ei_local;
 
     static u32 hydra_offsets[16] = {
 	0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
@@ -137,6 +139,8 @@ static int hydra_init(struct zorro_dev *z)
     start_page = NESM_START_PG;
     stop_page = NESM_STOP_PG;
 
+    ei_local = netdev_priv(dev);
+    ei_local->msg_enable = hydra_msg_enable;
     dev->base_addr = ioaddr;
     dev->irq = IRQ_AMIGA_PORTS;
 
@@ -187,15 +191,16 @@ static int hydra_open(struct net_device *dev)
 
 static int hydra_close(struct net_device *dev)
 {
-    if (ei_debug > 1)
-	printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
+    struct ei_device *ei_local = netdev_priv(dev);
+
+    netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard.\n");
     __ei_close(dev);
     return 0;
 }
 
 static void hydra_reset_8390(struct net_device *dev)
 {
-    printk(KERN_INFO "Hydra hw reset not there\n");
+    netdev_info(dev, "Hydra hw reset not there\n");
 }
 
 static void hydra_get_8390_hdr(struct net_device *dev,
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index b329f5c0d62b..d2cd80444ade 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -99,11 +99,6 @@
 #define ei_block_input (ei_local->block_input)
 #define ei_get_8390_hdr (ei_local->get_8390_hdr)
 
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef ei_debug
-int ei_debug = 1;
-#endif
-
 /* Index to functions. */
 static void ei_tx_intr(struct net_device *dev);
 static void ei_tx_err(struct net_device *dev);
@@ -116,6 +111,11 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
 static void do_set_multicast_list(struct net_device *dev);
 static void __NS8390_init(struct net_device *dev, int startp);
 
+static unsigned version_printed;
+static u32 msg_enable;
+module_param(msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
+
 /*
  *	SMP and the 8390 setup.
  *
@@ -345,19 +345,23 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
 	if (ei_local->tx1 == 0) {
 		output_page = ei_local->tx_start_page;
 		ei_local->tx1 = send_length;
-		if (ei_debug  &&  ei_local->tx2 > 0)
-			netdev_dbg(dev, "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
+		if ((netif_msg_tx_queued(ei_local)) &&
+		    ei_local->tx2 > 0)
+			netdev_dbg(dev,
+				   "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
 				   ei_local->tx2, ei_local->lasttx, ei_local->txing);
 	} else if (ei_local->tx2 == 0) {
 		output_page = ei_local->tx_start_page + TX_PAGES/2;
 		ei_local->tx2 = send_length;
-		if (ei_debug  &&  ei_local->tx1 > 0)
-			netdev_dbg(dev, "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
+		if ((netif_msg_tx_queued(ei_local)) &&
+		    ei_local->tx1 > 0)
+			netdev_dbg(dev,
+				   "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
 				   ei_local->tx1, ei_local->lasttx, ei_local->txing);
 	} else {			/* We should never get here. */
-		if (ei_debug)
-			netdev_dbg(dev, "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
-				   ei_local->tx1, ei_local->tx2, ei_local->lasttx);
+		netif_dbg(ei_local, tx_err, dev,
+			  "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+			  ei_local->tx1, ei_local->tx2, ei_local->lasttx);
 		ei_local->irqlock = 0;
 		netif_stop_queue(dev);
 		ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
@@ -388,7 +392,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
 	} else
 		ei_local->txqueue++;
 
-	if (ei_local->tx1  &&  ei_local->tx2)
+	if (ei_local->tx1 && ei_local->tx2)
 		netif_stop_queue(dev);
 	else
 		netif_start_queue(dev);
@@ -445,9 +449,8 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
 
 	/* Change to page 0 and read the intr status reg. */
 	ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
-	if (ei_debug > 3)
-		netdev_dbg(dev, "interrupt(isr=%#2.2x)\n",
-			   ei_inb_p(e8390_base + EN0_ISR));
+	netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n",
+		  ei_inb_p(e8390_base + EN0_ISR));
 
 	/* !!Assumption!! -- we stay in page 0.	 Don't break this. */
 	while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
@@ -485,7 +488,7 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
 		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
 	}
 
-	if (interrupts && ei_debug) {
+	if (interrupts && (netif_msg_intr(ei_local))) {
 		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
 		if (nr_serviced >= MAX_SERVICE) {
 			/* 0xFF is valid for a card removal */
@@ -676,10 +679,11 @@ static void ei_receive(struct net_device *dev)
 		   Keep quiet if it looks like a card removal. One problem here
 		   is that some clones crash in roughly the same way.
 		 */
-		if (ei_debug > 0 &&
+		if ((netif_msg_rx_status(ei_local)) &&
 		    this_frame != ei_local->current_page &&
 		    (this_frame != 0x0 || rxing_page != 0xFF))
-			netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
+			netdev_err(dev,
+				   "mismatched read page pointers %2x vs %2x\n",
 				   this_frame, ei_local->current_page);
 
 		if (this_frame == rxing_page)	/* Read all the frames? */
@@ -707,10 +711,10 @@ static void ei_receive(struct net_device *dev)
 		}
 
 		if (pkt_len < 60  ||  pkt_len > 1518) {
-			if (ei_debug)
-				netdev_dbg(dev, "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
-					   rx_frame.count, rx_frame.status,
-					   rx_frame.next);
+			netif_dbg(ei_local, rx_status, dev,
+				  "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
+				  rx_frame.count, rx_frame.status,
+				  rx_frame.next);
 			dev->stats.rx_errors++;
 			dev->stats.rx_length_errors++;
 		} else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
@@ -718,9 +722,9 @@ static void ei_receive(struct net_device *dev)
 
 			skb = netdev_alloc_skb(dev, pkt_len + 2);
 			if (skb == NULL) {
-				if (ei_debug > 1)
-					netdev_dbg(dev, "Couldn't allocate a sk_buff of size %d\n",
-						   pkt_len);
+				netif_err(ei_local, rx_err, dev,
+					  "Couldn't allocate a sk_buff of size %d\n",
+					  pkt_len);
 				dev->stats.rx_dropped++;
 				break;
 			} else {
@@ -736,10 +740,10 @@ static void ei_receive(struct net_device *dev)
 					dev->stats.multicast++;
 			}
 		} else {
-			if (ei_debug)
-				netdev_dbg(dev, "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
-					   rx_frame.status, rx_frame.next,
-					   rx_frame.count);
+			netif_err(ei_local, rx_err, dev,
+				  "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+				  rx_frame.status, rx_frame.next,
+				  rx_frame.count);
 			dev->stats.rx_errors++;
 			/* NB: The NIC counts CRC, frame and missed errors. */
 			if (pkt_stat & ENRSR_FO)
@@ -789,8 +793,7 @@ static void ei_rx_overrun(struct net_device *dev)
 	was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
 	ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
 
-	if (ei_debug > 1)
-		netdev_dbg(dev, "Receiver overrun\n");
+	netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n");
 	dev->stats.rx_over_errors++;
 
 	/*
@@ -965,8 +968,9 @@ static void __ei_set_multicast_list(struct net_device *dev)
 static void ethdev_setup(struct net_device *dev)
 {
 	struct ei_device *ei_local = netdev_priv(dev);
-	if (ei_debug > 1)
-		printk(version);
+
+	if ((msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+		pr_info("%s", version);
 
 	ether_setup(dev);
 
@@ -1035,9 +1039,10 @@ static void __NS8390_init(struct net_device *dev, int startp)
 	ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
 	for (i = 0; i < 6; i++) {
 		ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
-		if (ei_debug > 1 &&
+		if ((netif_msg_probe(ei_local)) &&
 		    ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
-			netdev_err(dev, "Hw. address read/write mismap %d\n", i);
+			netdev_err(dev,
+				   "Hw. address read/write mismap %d\n", i);
 	}
 
 	ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 88ccc8b14f0a..90e825e8abfe 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -167,6 +167,7 @@ static void slow_sane_block_output(struct net_device *dev, int count,
 				   const unsigned char *buf, int start_page);
 static void word_memcpy_tocard(unsigned long tp, const void *fp, int count);
 static void word_memcpy_fromcard(void *tp, unsigned long fp, int count);
+static u32 mac8390_msg_enable;
 
 static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
 {
@@ -402,6 +403,7 @@ struct net_device * __init mac8390_probe(int unit)
 	struct net_device *dev;
 	struct nubus_dev *ndev = NULL;
 	int err = -ENODEV;
+	struct ei_device *ei_local;
 
 	static unsigned int slots;
 
@@ -440,6 +442,10 @@ struct net_device * __init mac8390_probe(int unit)
 
 	if (!ndev)
 		goto out;
+
+	 ei_local = netdev_priv(dev);
+	 ei_local->msg_enable = mac8390_msg_enable;
+
 	err = register_netdev(dev);
 	if (err)
 		goto out;
@@ -660,19 +666,22 @@ static int mac8390_close(struct net_device *dev)
 
 static void mac8390_no_reset(struct net_device *dev)
 {
+	struct ei_device *ei_local = netdev_priv(dev);
+
 	ei_status.txing = 0;
-	if (ei_debug > 1)
-		pr_info("reset not supported\n");
+	netif_info(ei_local, hw, dev, "reset not supported\n");
 }
 
 static void interlan_reset(struct net_device *dev)
 {
 	unsigned char *target = nubus_slot_addr(IRQ2SLOT(dev->irq));
-	if (ei_debug > 1)
-		pr_info("Need to reset the NS8390 t=%lu...", jiffies);
+	struct ei_device *ei_local = netdev_priv(dev);
+
+	netif_info(ei_local, hw, dev, "Need to reset the NS8390 t=%lu...",
+		   jiffies);
 	ei_status.txing = 0;
 	target[0xC0000] = 0;
-	if (ei_debug > 1)
+	if (netif_msg_hw(ei_local))
 		pr_cont("reset complete\n");
 }
 
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 230efd6fa5d5..38fcdcf7c4c7 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -13,7 +13,6 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -39,6 +38,7 @@ static const char version[] =
 
 #define NESM_START_PG	0x40	/* First page of TX buffer */
 #define NESM_STOP_PG	0x80	/* Last page +1 of RX ring */
+static u32 mcf8390_msg_enable;
 
 #ifdef NE2000_ODDOFFSET
 /*
@@ -153,9 +153,9 @@ static void mcf8390_reset_8390(struct net_device *dev)
 {
 	unsigned long reset_start_time = jiffies;
 	u32 addr = dev->base_addr;
+	struct ei_device *ei_local = netdev_priv(dev);
 
-	if (ei_debug > 1)
-		netdev_dbg(dev, "resetting the 8390 t=%ld...\n", jiffies);
+	netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
 
 	ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
 
@@ -288,7 +288,7 @@ static void mcf8390_block_output(struct net_device *dev, int count,
 	dma_start = jiffies;
 	while ((ei_inb(addr + NE_EN0_ISR) & ENISR_RDC) == 0) {
 		if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */
-			netdev_err(dev, "timeout waiting for Tx RDC\n");
+			netdev_warn(dev, "timeout waiting for Tx RDC\n");
 			mcf8390_reset_8390(dev);
 			__NS8390_init(dev, 1);
 			break;
@@ -437,6 +437,7 @@ static int mcf8390_probe(struct platform_device *pdev)
 	SET_NETDEV_DEV(dev, &pdev->dev);
 	platform_set_drvdata(pdev, dev);
 	ei_local = netdev_priv(dev);
+	ei_local->msg_enable = mcf8390_msg_enable;
 
 	dev->irq = irq->start;
 	dev->base_addr = mem->start;
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index b2e840513735..58eaa8f34942 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -71,14 +71,17 @@ static struct platform_device *pdev_ne[MAX_NE_CARDS];
 static int io[MAX_NE_CARDS];
 static int irq[MAX_NE_CARDS];
 static int bad[MAX_NE_CARDS];
+static u32 ne_msg_enable;
 
 #ifdef MODULE
 module_param_array(io, int, NULL, 0);
 module_param_array(irq, int, NULL, 0);
 module_param_array(bad, int, NULL, 0);
+module_param_named(msg_enable, ne_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
 MODULE_PARM_DESC(io, "I/O base address(es),required");
 MODULE_PARM_DESC(irq, "IRQ number(s)");
 MODULE_PARM_DESC(bad, "Accept card(s) with bad signatures");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
 MODULE_DESCRIPTION("NE1000/NE2000 ISA/PnP Ethernet driver");
 MODULE_LICENSE("GPL");
 #endif /* MODULE */
@@ -214,8 +217,8 @@ static int __init do_ne_probe(struct net_device *dev)
 	if (base_addr > 0x1ff) {	/* Check a single specified location. */
 		int ret = ne_probe1(dev, base_addr);
 		if (ret)
-			printk(KERN_WARNING "ne.c: No NE*000 card found at "
-				"i/o = %#lx\n", base_addr);
+			netdev_warn(dev, "ne.c: No NE*000 card found at "
+				    "i/o = %#lx\n", base_addr);
 		return ret;
 	}
 	else if (base_addr != 0)	/* Don't probe at all. */
@@ -264,11 +267,14 @@ static int __init ne_probe_isapnp(struct net_device *dev)
 			/* found it */
 			dev->base_addr = pnp_port_start(idev, 0);
 			dev->irq = pnp_irq(idev, 0);
-			printk(KERN_INFO "ne.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
-				(char *) isapnp_clone_list[i].driver_data,
-				dev->base_addr, dev->irq);
+			netdev_info(dev,
+				    "ne.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
+				    (char *) isapnp_clone_list[i].driver_data,
+				    dev->base_addr, dev->irq);
 			if (ne_probe1(dev, dev->base_addr) != 0) {	/* Shouldn't happen. */
-				printk(KERN_ERR "ne.c: Probe of ISAPnP card at %#lx failed.\n", dev->base_addr);
+				netdev_err(dev,
+					   "ne.c: Probe of ISAPnP card at %#lx failed.\n",
+					   dev->base_addr);
 				pnp_device_detach(idev);
 				return -ENXIO;
 			}
@@ -293,6 +299,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
 	int neX000, ctron, copam, bad_card;
 	int reg0, ret;
 	static unsigned version_printed;
+	struct ei_device *ei_local = netdev_priv(dev);
 
 	if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
 		return -EBUSY;
@@ -319,10 +326,10 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
 		}
 	}
 
-	if (ei_debug  &&  version_printed++ == 0)
-		printk(KERN_INFO "%s%s", version1, version2);
+	if ((ne_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+		netdev_info(dev, "%s%s", version1, version2);
 
-	printk(KERN_INFO "NE*000 ethercard probe at %#3lx:", ioaddr);
+	netdev_info(dev, "NE*000 ethercard probe at %#3lx:", ioaddr);
 
 	/* A user with a poor card that fails to ack the reset, or that
 	   does not have a valid 0x57,0x57 signature can still use this
@@ -343,10 +350,10 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
 		while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
 		if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
 			if (bad_card) {
-				printk(" (warning: no reset ack)");
+				pr_cont(" (warning: no reset ack)");
 				break;
 			} else {
-				printk(" not found (no reset ack).\n");
+				pr_cont(" not found (no reset ack).\n");
 				ret = -ENODEV;
 				goto err_out;
 			}
@@ -454,13 +461,13 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
 		}
 		if (bad_clone_list[i].name8 == NULL)
 		{
-			printk(" not found (invalid signature %2.2x %2.2x).\n",
+			pr_cont(" not found (invalid signature %2.2x %2.2x).\n",
 				SA_prom[14], SA_prom[15]);
 			ret = -ENXIO;
 			goto err_out;
 		}
 #else
-		printk(" not found.\n");
+		pr_cont(" not found.\n");
 		ret = -ENXIO;
 		goto err_out;
 #endif
@@ -476,15 +483,15 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
 		mdelay(10);		/* wait 10ms for interrupt to propagate */
 		outb_p(0x00, ioaddr + EN0_IMR); 		/* Mask it again. */
 		dev->irq = probe_irq_off(cookie);
-		if (ei_debug > 2)
-			printk(" autoirq is %d\n", dev->irq);
+		if (netif_msg_probe(ei_local))
+			pr_cont(" autoirq is %d", dev->irq);
 	} else if (dev->irq == 2)
 		/* Fixup for users that don't know that IRQ 2 is really IRQ 9,
 		   or don't know which one to set. */
 		dev->irq = 9;
 
 	if (! dev->irq) {
-		printk(" failed to detect IRQ line.\n");
+		pr_cont(" failed to detect IRQ line.\n");
 		ret = -EAGAIN;
 		goto err_out;
 	}
@@ -493,7 +500,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
 	   share and the board will usually be enabled. */
 	ret = request_irq(dev->irq, eip_interrupt, 0, name, dev);
 	if (ret) {
-		printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
+		pr_cont(" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
 		goto err_out;
 	}
 
@@ -512,7 +519,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
 	}
 #endif
 
-	printk("%pM\n", dev->dev_addr);
+	pr_cont("%pM\n", dev->dev_addr);
 
 	ei_status.name = name;
 	ei_status.tx_start_page = start_page;
@@ -536,11 +543,12 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
 	dev->netdev_ops = &eip_netdev_ops;
 	NS8390p_init(dev, 0);
 
+	ei_local->msg_enable = ne_msg_enable;
 	ret = register_netdev(dev);
 	if (ret)
 		goto out_irq;
-	printk(KERN_INFO "%s: %s found at %#lx, using IRQ %d.\n",
-	       dev->name, name, ioaddr, dev->irq);
+	netdev_info(dev, "%s found at %#lx, using IRQ %d.\n",
+		    name, ioaddr, dev->irq);
 	return 0;
 
 out_irq:
@@ -556,9 +564,9 @@ err_out:
 static void ne_reset_8390(struct net_device *dev)
 {
 	unsigned long reset_start_time = jiffies;
+	struct ei_device *ei_local = netdev_priv(dev);
 
-	if (ei_debug > 1)
-		printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
+	netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies);
 
 	/* DON'T change these to inb_p/outb_p or reset will fail on clones. */
 	outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
@@ -569,7 +577,7 @@ static void ne_reset_8390(struct net_device *dev)
 	/* This check _should_not_ be necessary, omit eventually. */
 	while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
 		if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
-			printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name);
+			netdev_err(dev, "ne_reset_8390() did not complete.\n");
 			break;
 		}
 	outb_p(ENISR_RESET, NE_BASE + EN0_ISR);	/* Ack intr. */
@@ -587,9 +595,9 @@ static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, i
 
 	if (ei_status.dmaing)
 	{
-		printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr "
-			"[DMAstat:%d][irqlock:%d].\n",
-			dev->name, ei_status.dmaing, ei_status.irqlock);
+		netdev_err(dev, "DMAing conflict in ne_get_8390_hdr "
+			   "[DMAstat:%d][irqlock:%d].\n",
+			   ei_status.dmaing, ei_status.irqlock);
 		return;
 	}
 
@@ -621,6 +629,7 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
 {
 #ifdef NE_SANITY_CHECK
 	int xfer_count = count;
+	struct ei_device *ei_local = netdev_priv(dev);
 #endif
 	int nic_base = dev->base_addr;
 	char *buf = skb->data;
@@ -628,9 +637,9 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
 	/* This *shouldn't* happen. If it does, it's the last thing you'll see */
 	if (ei_status.dmaing)
 	{
-		printk(KERN_EMERG "%s: DMAing conflict in ne_block_input "
-			"[DMAstat:%d][irqlock:%d].\n",
-			dev->name, ei_status.dmaing, ei_status.irqlock);
+		netdev_err(dev, "DMAing conflict in ne_block_input "
+			   "[DMAstat:%d][irqlock:%d].\n",
+			   ei_status.dmaing, ei_status.irqlock);
 		return;
 	}
 	ei_status.dmaing |= 0x01;
@@ -660,7 +669,7 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
 	   this message you either 1) have a slightly incompatible clone
 	   or 2) have noise/speed problems with your bus. */
 
-	if (ei_debug > 1)
+	if (netif_msg_rx_status(ei_local))
 	{
 		/* DMA termination address check... */
 		int addr, tries = 20;
@@ -674,9 +683,9 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
 				break;
 		} while (--tries > 0);
 	 	if (tries <= 0)
-			printk(KERN_WARNING "%s: RX transfer address mismatch,"
-				"%#4.4x (expected) vs. %#4.4x (actual).\n",
-				dev->name, ring_offset + xfer_count, addr);
+			netdev_warn(dev, "RX transfer address mismatch,"
+				    "%#4.4x (expected) vs. %#4.4x (actual).\n",
+				    ring_offset + xfer_count, addr);
 	}
 #endif
 	outb_p(ENISR_RDC, nic_base + EN0_ISR);	/* Ack intr. */
@@ -690,6 +699,7 @@ static void ne_block_output(struct net_device *dev, int count,
 	unsigned long dma_start;
 #ifdef NE_SANITY_CHECK
 	int retries = 0;
+	struct ei_device *ei_local = netdev_priv(dev);
 #endif
 
 	/* Round the count up for word writes.  Do we need to do this?
@@ -702,9 +712,9 @@ static void ne_block_output(struct net_device *dev, int count,
 	/* This *shouldn't* happen. If it does, it's the last thing you'll see */
 	if (ei_status.dmaing)
 	{
-		printk(KERN_EMERG "%s: DMAing conflict in ne_block_output."
-			"[DMAstat:%d][irqlock:%d]\n",
-			dev->name, ei_status.dmaing, ei_status.irqlock);
+		netdev_err(dev, "DMAing conflict in ne_block_output."
+			   "[DMAstat:%d][irqlock:%d]\n",
+			   ei_status.dmaing, ei_status.irqlock);
 		return;
 	}
 	ei_status.dmaing |= 0x01;
@@ -751,7 +761,7 @@ retry:
 	/* This was for the ALPHA version only, but enough people have
 	   been encountering problems so it is still here. */
 
-	if (ei_debug > 1)
+	if (netif_msg_tx_queued(ei_local))
 	{
 		/* DMA termination address check... */
 		int addr, tries = 20;
@@ -765,9 +775,9 @@ retry:
 
 		if (tries <= 0)
 		{
-			printk(KERN_WARNING "%s: Tx packet transfer address mismatch,"
-				"%#4.4x (expected) vs. %#4.4x (actual).\n",
-				dev->name, (start_page << 8) + count, addr);
+			netdev_warn(dev, "Tx packet transfer address mismatch,"
+				    "%#4.4x (expected) vs. %#4.4x (actual).\n",
+				    (start_page << 8) + count, addr);
 			if (retries++ == 0)
 				goto retry;
 		}
@@ -776,7 +786,7 @@ retry:
 
 	while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
 		if (time_after(jiffies, dma_start + 2*HZ/100)) {		/* 20ms */
-			printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+			netdev_warn(dev, "timeout waiting for Tx RDC.\n");
 			ne_reset_8390(dev);
 			NS8390p_init(dev, 1);
 			break;
@@ -936,8 +946,8 @@ int __init init_module(void)
 	retval = platform_driver_probe(&ne_driver, ne_drv_probe);
 	if (retval) {
 		if (io[0] == 0)
-			printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\""
-				" value(s) for ISA cards.\n");
+			pr_notice("ne.c: You must supply \"io=0xNNN\""
+			       " value(s) for ISA cards.\n");
 		ne_loop_rm_unreg(1);
 		return retval;
 	}
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index fc14a85e4d5f..f395c967262e 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -33,8 +33,6 @@
 /* The user-configurable values.
    These may be modified when a driver module is loaded.*/
 
-static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
-
 #define MAX_UNITS 8				/* More are supported, limit only on options */
 /* Used to pass the full-duplex flag, etc. */
 static int full_duplex[MAX_UNITS];
@@ -60,6 +58,8 @@ static int options[MAX_UNITS];
 
 #include "8390.h"
 
+static u32 ne2k_msg_enable;
+
 /* These identify the driver base version and may not be removed. */
 static const char version[] =
 	KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
@@ -76,10 +76,10 @@ MODULE_AUTHOR("Donald Becker / Paul Gortmaker");
 MODULE_DESCRIPTION("PCI NE2000 clone driver");
 MODULE_LICENSE("GPL");
 
-module_param(debug, int, 0);
+module_param_named(msg_enable, ne2k_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
 module_param_array(options, int, NULL, 0);
 module_param_array(full_duplex, int, NULL, 0);
-MODULE_PARM_DESC(debug, "debug level (1-2)");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
 MODULE_PARM_DESC(options, "Bit 5: full duplex");
 MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
 
@@ -226,6 +226,7 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
 	static unsigned int fnd_cnt;
 	long ioaddr;
 	int flags = pci_clone_list[chip_idx].flags;
+	struct ei_device *ei_local;
 
 /* when built into the kernel, we only print version if device is found */
 #ifndef MODULE
@@ -280,6 +281,8 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
 		goto err_out_free_res;
 	}
 	dev->netdev_ops = &ne2k_netdev_ops;
+	ei_local = netdev_priv(dev);
+	ei_local->msg_enable = ne2k_msg_enable;
 
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
@@ -379,9 +382,9 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
 	if (i)
 		goto err_out_free_netdev;
 
-	printk("%s: %s found at %#lx, IRQ %d, %pM.\n",
-	       dev->name, pci_clone_list[chip_idx].name, ioaddr, dev->irq,
-	       dev->dev_addr);
+	netdev_info(dev, "%s found at %#lx, IRQ %d, %pM.\n",
+		    pci_clone_list[chip_idx].name, ioaddr, dev->irq,
+		    dev->dev_addr);
 
 	return 0;
 
@@ -450,9 +453,10 @@ static int ne2k_pci_close(struct net_device *dev)
 static void ne2k_pci_reset_8390(struct net_device *dev)
 {
 	unsigned long reset_start_time = jiffies;
+	struct ei_device *ei_local = netdev_priv(dev);
 
-	if (debug > 1) printk("%s: Resetting the 8390 t=%ld...",
-						  dev->name, jiffies);
+	netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n",
+		  jiffies);
 
 	outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
 
@@ -462,7 +466,7 @@ static void ne2k_pci_reset_8390(struct net_device *dev)
 	/* This check _should_not_ be necessary, omit eventually. */
 	while ((inb(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
 		if (jiffies - reset_start_time > 2) {
-			printk("%s: ne2k_pci_reset_8390() did not complete.\n", dev->name);
+			netdev_err(dev, "ne2k_pci_reset_8390() did not complete.\n");
 			break;
 		}
 	outb(ENISR_RESET, NE_BASE + EN0_ISR);	/* Ack intr. */
@@ -479,9 +483,9 @@ static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *
 
 	/* This *shouldn't* happen. If it does, it's the last thing you'll see */
 	if (ei_status.dmaing) {
-		printk("%s: DMAing conflict in ne2k_pci_get_8390_hdr "
+		netdev_err(dev, "DMAing conflict in ne2k_pci_get_8390_hdr "
 			   "[DMAstat:%d][irqlock:%d].\n",
-			   dev->name, ei_status.dmaing, ei_status.irqlock);
+			   ei_status.dmaing, ei_status.irqlock);
 		return;
 	}
 
@@ -517,9 +521,9 @@ static void ne2k_pci_block_input(struct net_device *dev, int count,
 
 	/* This *shouldn't* happen. If it does, it's the last thing you'll see */
 	if (ei_status.dmaing) {
-		printk("%s: DMAing conflict in ne2k_pci_block_input "
+		netdev_err(dev, "DMAing conflict in ne2k_pci_block_input "
 			   "[DMAstat:%d][irqlock:%d].\n",
-			   dev->name, ei_status.dmaing, ei_status.irqlock);
+			   ei_status.dmaing, ei_status.irqlock);
 		return;
 	}
 	ei_status.dmaing |= 0x01;
@@ -572,9 +576,9 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
 
 	/* This *shouldn't* happen. If it does, it's the last thing you'll see */
 	if (ei_status.dmaing) {
-		printk("%s: DMAing conflict in ne2k_pci_block_output."
+		netdev_err(dev, "DMAing conflict in ne2k_pci_block_output."
 			   "[DMAstat:%d][irqlock:%d]\n",
-			   dev->name, ei_status.dmaing, ei_status.irqlock);
+			   ei_status.dmaing, ei_status.irqlock);
 		return;
 	}
 	ei_status.dmaing |= 0x01;
@@ -619,7 +623,7 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
 
 	while ((inb(nic_base + EN0_ISR) & ENISR_RDC) == 0)
 		if (jiffies - dma_start > 2) {			/* Avoid clock roll-over. */
-			printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+			netdev_warn(dev, "timeout waiting for Tx RDC.\n");
 			ne2k_pci_reset_8390(dev);
 			NS8390_init(dev,1);
 			break;
@@ -640,8 +644,24 @@ static void ne2k_pci_get_drvinfo(struct net_device *dev,
 	strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
 }
 
+static u32 ne2k_pci_get_msglevel(struct net_device *dev)
+{
+	struct ei_device *ei_local = netdev_priv(dev);
+
+	return ei_local->msg_enable;
+}
+
+static void ne2k_pci_set_msglevel(struct net_device *dev, u32 v)
+{
+	struct ei_device *ei_local = netdev_priv(dev);
+
+	ei_local->msg_enable = v;
+}
+
 static const struct ethtool_ops ne2k_pci_ethtool_ops = {
 	.get_drvinfo		= ne2k_pci_get_drvinfo,
+	.get_msglevel		= ne2k_pci_get_msglevel,
+	.set_msglevel		= ne2k_pci_set_msglevel,
 };
 
 static void ne2k_pci_remove_one(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 46c5aadaca8e..ca3c2b921cf6 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -32,7 +32,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/ptrace.h>
 #include <linux/string.h>
 #include <linux/timer.h>
@@ -67,7 +66,7 @@
 #define PCNET_RDC_TIMEOUT (2*HZ/100)	/* Max wait in jiffies for Tx RDC */
 
 static const char *if_names[] = { "auto", "10baseT", "10base2"};
-
+static u32 pcnet_msg_enable;
 
 /*====================================================================*/
 
@@ -558,6 +557,7 @@ static int pcnet_config(struct pcmcia_device *link)
     int start_pg, stop_pg, cm_offset;
     int has_shmem = 0;
     hw_info_t *local_hw_info;
+    struct ei_device *ei_local;
 
     dev_dbg(&link->dev, "pcnet_config\n");
 
@@ -607,6 +607,8 @@ static int pcnet_config(struct pcmcia_device *link)
 	mii_phy_probe(dev);
 
     SET_NETDEV_DEV(dev, &link->dev);
+    ei_local = netdev_priv(dev);
+    ei_local->msg_enable = pcnet_msg_enable;
 
     if (register_netdev(dev) != 0) {
 	pr_notice("register_netdev() failed\n");
@@ -616,7 +618,7 @@ static int pcnet_config(struct pcmcia_device *link)
     if (info->flags & (IS_DL10019|IS_DL10022)) {
 	u_char id = inb(dev->base_addr + 0x1a);
 	netdev_info(dev, "NE2000 (DL100%d rev %02x): ",
-	       (info->flags & IS_DL10022) ? 22 : 19, id);
+		    (info->flags & IS_DL10022) ? 22 : 19, id);
 	if (info->pna_phy)
 	    pr_cont("PNA, ");
     } else {
@@ -1063,9 +1065,9 @@ static void ei_watchdog(u_long arg)
 	    if (info->phy_id == info->eth_phy) {
 		if (p)
 		    netdev_info(dev, "autonegotiation complete: "
-			   "%sbaseT-%cD selected\n",
-			   ((p & 0x0180) ? "100" : "10"),
-			   ((p & 0x0140) ? 'F' : 'H'));
+				"%sbaseT-%cD selected\n",
+				((p & 0x0180) ? "100" : "10"),
+				((p & 0x0140) ? 'F' : 'H'));
 		else
 		    netdev_info(dev, "link partner did not autonegotiate\n");
 	    }
@@ -1081,7 +1083,7 @@ static void ei_watchdog(u_long arg)
 	    mdio_write(mii_addr, info->phy_id, 0, 0x0400);
 	    info->phy_id ^= info->pna_phy ^ info->eth_phy;
 	    netdev_info(dev, "switched to %s transceiver\n",
-		   (info->phy_id == info->eth_phy) ? "ethernet" : "PNA");
+			(info->phy_id == info->eth_phy) ? "ethernet" : "PNA");
 	    mdio_write(mii_addr, info->phy_id, 0,
 		       (info->phy_id == info->eth_phy) ? 0x1000 : 0);
 	    info->link_status = 0;
@@ -1128,9 +1130,9 @@ static void dma_get_8390_hdr(struct net_device *dev,
     unsigned int nic_base = dev->base_addr;
 
     if (ei_status.dmaing) {
-	netdev_notice(dev, "DMAing conflict in dma_block_input."
-	       "[DMAstat:%1x][irqlock:%1x]\n",
-	       ei_status.dmaing, ei_status.irqlock);
+	netdev_err(dev, "DMAing conflict in dma_block_input."
+		   "[DMAstat:%1x][irqlock:%1x]\n",
+		   ei_status.dmaing, ei_status.irqlock);
 	return;
     }
 
@@ -1159,13 +1161,14 @@ static void dma_block_input(struct net_device *dev, int count,
     unsigned int nic_base = dev->base_addr;
     int xfer_count = count;
     char *buf = skb->data;
+    struct ei_device *ei_local = netdev_priv(dev);
 
-    if ((ei_debug > 4) && (count != 4))
+    if ((netif_msg_rx_status(ei_local)) && (count != 4))
 	netdev_dbg(dev, "[bi=%d]\n", count+4);
     if (ei_status.dmaing) {
-	netdev_notice(dev, "DMAing conflict in dma_block_input."
-	       "[DMAstat:%1x][irqlock:%1x]\n",
-	       ei_status.dmaing, ei_status.irqlock);
+	netdev_err(dev, "DMAing conflict in dma_block_input."
+		   "[DMAstat:%1x][irqlock:%1x]\n",
+		   ei_status.dmaing, ei_status.irqlock);
 	return;
     }
     ei_status.dmaing |= 0x01;
@@ -1183,7 +1186,8 @@ static void dma_block_input(struct net_device *dev, int count,
     /* This was for the ALPHA version only, but enough people have been
        encountering problems that it is still here. */
 #ifdef PCMCIA_DEBUG
-    if (ei_debug > 4) {		/* DMA termination address check... */
+      /* DMA termination address check... */
+    if (netif_msg_rx_status(ei_local)) {
 	int addr, tries = 20;
 	do {
 	    /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
@@ -1196,8 +1200,8 @@ static void dma_block_input(struct net_device *dev, int count,
 	} while (--tries > 0);
 	if (tries <= 0)
 	    netdev_notice(dev, "RX transfer address mismatch,"
-		   "%#4.4x (expected) vs. %#4.4x (actual).\n",
-		   ring_offset + xfer_count, addr);
+			  "%#4.4x (expected) vs. %#4.4x (actual).\n",
+			  ring_offset + xfer_count, addr);
     }
 #endif
     outb_p(ENISR_RDC, nic_base + EN0_ISR);	/* Ack intr. */
@@ -1213,12 +1217,12 @@ static void dma_block_output(struct net_device *dev, int count,
     pcnet_dev_t *info = PRIV(dev);
 #ifdef PCMCIA_DEBUG
     int retries = 0;
+    struct ei_device *ei_local = netdev_priv(dev);
 #endif
     u_long dma_start;
 
 #ifdef PCMCIA_DEBUG
-    if (ei_debug > 4)
-	netdev_dbg(dev, "[bo=%d]\n", count);
+    netif_dbg(ei_local, tx_queued, dev, "[bo=%d]\n", count);
 #endif
 
     /* Round the count up for word writes.  Do we need to do this?
@@ -1227,9 +1231,9 @@ static void dma_block_output(struct net_device *dev, int count,
     if (count & 0x01)
 	count++;
     if (ei_status.dmaing) {
-	netdev_notice(dev, "DMAing conflict in dma_block_output."
-	       "[DMAstat:%1x][irqlock:%1x]\n",
-	       ei_status.dmaing, ei_status.irqlock);
+	netdev_err(dev, "DMAing conflict in dma_block_output."
+		   "[DMAstat:%1x][irqlock:%1x]\n",
+		   ei_status.dmaing, ei_status.irqlock);
 	return;
     }
     ei_status.dmaing |= 0x01;
@@ -1256,7 +1260,8 @@ static void dma_block_output(struct net_device *dev, int count,
 #ifdef PCMCIA_DEBUG
     /* This was for the ALPHA version only, but enough people have been
        encountering problems that it is still here. */
-    if (ei_debug > 4) {	/* DMA termination address check... */
+    /* DMA termination address check... */
+    if (netif_msg_tx_queued(ei_local)) {
 	int addr, tries = 20;
 	do {
 	    int high = inb_p(nic_base + EN0_RSARHI);
@@ -1267,8 +1272,8 @@ static void dma_block_output(struct net_device *dev, int count,
 	} while (--tries > 0);
 	if (tries <= 0) {
 	    netdev_notice(dev, "Tx packet transfer address mismatch,"
-		   "%#4.4x (expected) vs. %#4.4x (actual).\n",
-		   (start_page << 8) + count, addr);
+			  "%#4.4x (expected) vs. %#4.4x (actual).\n",
+			  (start_page << 8) + count, addr);
 	    if (retries++ == 0)
 		goto retry;
 	}
@@ -1277,10 +1282,10 @@ static void dma_block_output(struct net_device *dev, int count,
 
     while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
 	if (time_after(jiffies, dma_start + PCNET_RDC_TIMEOUT)) {
-	    netdev_notice(dev, "timeout waiting for Tx RDC.\n");
-	    pcnet_reset_8390(dev);
-	    NS8390_init(dev, 1);
-	    break;
+		netdev_warn(dev, "timeout waiting for Tx RDC.\n");
+		pcnet_reset_8390(dev);
+		NS8390_init(dev, 1);
+		break;
 	}
 
     outb_p(ENISR_RDC, nic_base + EN0_ISR);	/* Ack intr. */
diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c
index b0fbce39661a..139385dcdaa7 100644
--- a/drivers/net/ethernet/8390/smc-ultra.c
+++ b/drivers/net/ethernet/8390/smc-ultra.c
@@ -111,6 +111,7 @@ static struct isapnp_device_id ultra_device_ids[] __initdata = {
 MODULE_DEVICE_TABLE(isapnp, ultra_device_ids);
 #endif
 
+static u32 ultra_msg_enable;
 
 #define START_PG		0x00	/* First page of TX buffer */
 
@@ -211,6 +212,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
 	unsigned char num_pages, irqreg, addr, piomode;
 	unsigned char idreg = inb(ioaddr + 7);
 	unsigned char reg4 = inb(ioaddr + 4) & 0x7f;
+	struct ei_device *ei_local = netdev_priv(dev);
 
 	if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME))
 		return -EBUSY;
@@ -232,16 +234,16 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
 		goto out;
 	}
 
-	if (ei_debug  &&  version_printed++ == 0)
-		printk(version);
+	if ((ultra_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+		netdev_info(dev, version);
 
 	model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ";
 
 	for (i = 0; i < 6; i++)
 		dev->dev_addr[i] = inb(ioaddr + 8 + i);
 
-	printk("%s: %s at %#3x, %pM", dev->name, model_name,
-	       ioaddr, dev->dev_addr);
+	netdev_info(dev, "%s at %#3x, %pM", model_name,
+		    ioaddr, dev->dev_addr);
 
 	/* Switch from the station address to the alternate register set and
 	   read the useful registers there. */
@@ -265,7 +267,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
 		irq = irqmap[((irqreg & 0x40) >> 4) + ((irqreg & 0x0c) >> 2)];
 
 		if (irq == 0) {
-			printk(", failed to detect IRQ line.\n");
+			pr_cont(", failed to detect IRQ line.\n");
 			retval =  -EAGAIN;
 			goto out;
 		}
@@ -296,7 +298,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
 
 	ei_status.mem = ioremap(dev->mem_start, (ei_status.stop_page - START_PG)*256);
 	if (!ei_status.mem) {
-		printk(", failed to ioremap.\n");
+		pr_cont(", failed to ioremap.\n");
 		retval =  -ENOMEM;
 		goto out;
 	}
@@ -304,14 +306,15 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
 	dev->mem_end = dev->mem_start + (ei_status.stop_page - START_PG)*256;
 
 	if (piomode) {
-		printk(",%s IRQ %d programmed-I/O mode.\n",
-			   eeprom_irq ? "EEPROM" : "assigned ", dev->irq);
+		pr_cont(", %s IRQ %d programmed-I/O mode.\n",
+			eeprom_irq ? "EEPROM" : "assigned ", dev->irq);
 		ei_status.block_input = &ultra_pio_input;
 		ei_status.block_output = &ultra_pio_output;
 		ei_status.get_8390_hdr = &ultra_pio_get_hdr;
 	} else {
-		printk(",%s IRQ %d memory %#lx-%#lx.\n", eeprom_irq ? "" : "assigned ",
-			   dev->irq, dev->mem_start, dev->mem_end-1);
+		pr_cont(", %s IRQ %d memory %#lx-%#lx.\n",
+			eeprom_irq ? "" : "assigned ", dev->irq, dev->mem_start,
+			dev->mem_end-1);
 		ei_status.block_input = &ultra_block_input;
 		ei_status.block_output = &ultra_block_output;
 		ei_status.get_8390_hdr = &ultra_get_8390_hdr;
@@ -320,6 +323,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
 
 	dev->netdev_ops = &ultra_netdev_ops;
 	NS8390_init(dev, 0);
+	ei_local->msg_enable = ultra_msg_enable;
 
 	retval = register_netdev(dev);
 	if (retval)
@@ -356,12 +360,15 @@ static int __init ultra_probe_isapnp(struct net_device *dev)
                         /* found it */
 			dev->base_addr = pnp_port_start(idev, 0);
 			dev->irq = pnp_irq(idev, 0);
-                        printk(KERN_INFO "smc-ultra.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
-                                (char *) ultra_device_ids[i].driver_data,
-                                dev->base_addr, dev->irq);
+			netdev_info(dev,
+				    "smc-ultra.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
+				    (char *) ultra_device_ids[i].driver_data,
+				    dev->base_addr, dev->irq);
                         if (ultra_probe1(dev, dev->base_addr) != 0) {      /* Shouldn't happen. */
-                                printk(KERN_ERR "smc-ultra.c: Probe of ISAPnP card at %#lx failed.\n", dev->base_addr);
-                                pnp_device_detach(idev);
+				netdev_err(dev,
+					   "smc-ultra.c: Probe of ISAPnP card at %#lx failed.\n",
+					   dev->base_addr);
+				pnp_device_detach(idev);
 				return -ENXIO;
                         }
                         ei_status.priv = (unsigned long)idev;
@@ -412,9 +419,10 @@ static void
 ultra_reset_8390(struct net_device *dev)
 {
 	int cmd_port = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC base addr */
+	struct ei_device *ei_local = netdev_priv(dev);
 
 	outb(ULTRA_RESET, cmd_port);
-	if (ei_debug > 1) printk("resetting Ultra, t=%ld...", jiffies);
+	netif_dbg(ei_local, hw, dev, "resetting Ultra, t=%ld...\n", jiffies);
 	ei_status.txing = 0;
 
 	outb(0x00, cmd_port);	/* Disable shared memory for safety. */
@@ -424,7 +432,7 @@ ultra_reset_8390(struct net_device *dev)
 	else
 		outb(0x01, cmd_port + 6);		/* Enable interrupts and memory. */
 
-	if (ei_debug > 1) printk("reset done\n");
+	netif_dbg(ei_local, hw, dev, "reset done\n");
 }
 
 /* Grab the 8390 specific header. Similar to the block_input routine, but
@@ -530,11 +538,11 @@ static int
 ultra_close_card(struct net_device *dev)
 {
 	int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* CMDREG */
+	struct ei_device *ei_local = netdev_priv(dev);
 
 	netif_stop_queue(dev);
 
-	if (ei_debug > 1)
-		printk("%s: Shutting down ethercard.\n", dev->name);
+	netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard.\n");
 
 	outb(0x00, ioaddr + 6);		/* Disable interrupts. */
 	free_irq(dev->irq, dev);
@@ -556,8 +564,10 @@ static int irq[MAX_ULTRA_CARDS];
 
 module_param_array(io, int, NULL, 0);
 module_param_array(irq, int, NULL, 0);
+module_param_named(msg_enable, ultra_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
 MODULE_PARM_DESC(io, "I/O base address(es)");
 MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
 MODULE_DESCRIPTION("SMC Ultra/EtherEZ ISA/PnP Ethernet driver");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index 8df4c4157230..aca957d4e121 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -69,6 +69,11 @@ static void stnic_block_output (struct net_device *dev, int count,
 
 static void stnic_init (struct net_device *dev);
 
+static u32 stnic_msg_enable;
+
+module_param_named(msg_enable, stnic_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
+
 /* SH7750 specific read/write io. */
 static inline void
 STNIC_DELAY (void)
@@ -100,6 +105,7 @@ static int __init stnic_probe(void)
 {
   struct net_device *dev;
   int i, err;
+  struct ei_device *ei_local;
 
   /* If we are not running on a SolutionEngine, give up now */
   if (! MACH_SE)
@@ -125,10 +131,10 @@ static int __init stnic_probe(void)
      share and the board will usually be enabled. */
   err = request_irq (dev->irq, ei_interrupt, 0, DRV_NAME, dev);
   if (err)  {
-      printk (KERN_EMERG " unable to get IRQ %d.\n", dev->irq);
-      free_netdev(dev);
-      return err;
-    }
+	netdev_emerg(dev, " unable to get IRQ %d.\n", dev->irq);
+	free_netdev(dev);
+	return err;
+  }
 
   ei_status.name = dev->name;
   ei_status.word16 = 1;
@@ -147,6 +153,8 @@ static int __init stnic_probe(void)
   ei_status.block_output = &stnic_block_output;
 
   stnic_init (dev);
+  ei_local = netdev_priv(dev);
+  ei_local->msg_enable = stnic_msg_enable;
 
   err = register_netdev(dev);
   if (err) {
@@ -156,7 +164,7 @@ static int __init stnic_probe(void)
   }
   stnic_dev = dev;
 
-  printk (KERN_INFO "NS ST-NIC 83902A\n");
+  netdev_info(dev, "NS ST-NIC 83902A\n");
 
   return 0;
 }
@@ -164,10 +172,11 @@ static int __init stnic_probe(void)
 static void
 stnic_reset (struct net_device *dev)
 {
+  struct ei_device *ei_local = netdev_priv(dev);
+
   *(vhalf *) PA_83902_RST = 0;
   udelay (5);
-  if (ei_debug > 1)
-    printk (KERN_WARNING "8390 reset done (%ld).\n", jiffies);
+  netif_warn(ei_local, hw, dev, "8390 reset done (%ld).\n", jiffies);
   *(vhalf *) PA_83902_RST = ~0;
   udelay (5);
 }
@@ -176,6 +185,8 @@ static void
 stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr,
 	       int ring_page)
 {
+  struct ei_device *ei_local = netdev_priv(dev);
+
   half buf[2];
 
   STNIC_WRITE (PG0_RSAR0, 0);
@@ -196,8 +207,7 @@ stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr,
   hdr->count = ((buf[1] >> 8) & 0xff) | (buf[1] << 8);
 #endif
 
-  if (ei_debug > 1)
-    printk (KERN_DEBUG "ring %x status %02x next %02x count %04x.\n",
+  netif_dbg(ei_local, probe, dev, "ring %x status %02x next %02x count %04x.\n",
 	    ring_page, hdr->status, hdr->next, hdr->count);
 
   STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA);
diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c
index 03eb3eed49fa..dd7d816bde52 100644
--- a/drivers/net/ethernet/8390/wd.c
+++ b/drivers/net/ethernet/8390/wd.c
@@ -60,6 +60,7 @@ static void wd_block_output(struct net_device *dev, int count,
 							const unsigned char *buf, int start_page);
 static int wd_close(struct net_device *dev);
 
+static u32 wd_msg_enable;
 
 #define WD_START_PG		0x00	/* First page of TX buffer */
 #define WD03_STOP_PG	0x20	/* Last page +1 of RX ring */
@@ -170,6 +171,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
 	int word16 = 0;				/* 0 = 8 bit, 1 = 16 bit */
 	const char *model_name;
 	static unsigned version_printed;
+	struct ei_device *ei_local = netdev_priv(dev);
 
 	for (i = 0; i < 8; i++)
 		checksum += inb(ioaddr + 8 + i);
@@ -180,19 +182,19 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
 
 	/* Check for semi-valid mem_start/end values if supplied. */
 	if ((dev->mem_start % 0x2000) || (dev->mem_end % 0x2000)) {
-		printk(KERN_WARNING "wd.c: user supplied mem_start or mem_end not on 8kB boundary - ignored.\n");
+		netdev_warn(dev,
+			    "wd.c: user supplied mem_start or mem_end not on 8kB boundary - ignored.\n");
 		dev->mem_start = 0;
 		dev->mem_end = 0;
 	}
 
-	if (ei_debug  &&  version_printed++ == 0)
-		printk(version);
+	if ((wd_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
+		netdev_info(dev, version);
 
 	for (i = 0; i < 6; i++)
 		dev->dev_addr[i] = inb(ioaddr + 8 + i);
 
-	printk("%s: WD80x3 at %#3x, %pM",
-	       dev->name, ioaddr, dev->dev_addr);
+	netdev_info(dev, "WD80x3 at %#3x, %pM", ioaddr, dev->dev_addr);
 
 	/* The following PureData probe code was contributed by
 	   Mike Jagdis <jaggy@purplet.demon.co.uk>. Puredata does software
@@ -244,8 +246,9 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
 		}
 #ifndef final_version
 		if ( !ancient && (inb(ioaddr+1) & 0x01) != (word16 & 0x01))
-			printk("\nWD80?3: Bus width conflict, %d (probe) != %d (reg report).",
-				   word16 ? 16 : 8, (inb(ioaddr+1) & 0x01) ? 16 : 8);
+			pr_cont("\nWD80?3: Bus width conflict, %d (probe) != %d (reg report).",
+				word16 ? 16 : 8,
+				(inb(ioaddr+1) & 0x01) ? 16 : 8);
 #endif
 	}
 
@@ -259,7 +262,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
 		if (reg0 == 0xff || reg0 == 0) {
 			/* Future plan: this could check a few likely locations first. */
 			dev->mem_start = 0xd0000;
-			printk(" assigning address %#lx", dev->mem_start);
+			pr_cont(" assigning address %#lx", dev->mem_start);
 		} else {
 			int high_addr_bits = inb(ioaddr+WD_CMDREG5) & 0x1f;
 			/* Some boards don't have the register 5 -- it returns 0xff. */
@@ -297,8 +300,8 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
 
 			outb_p(0x00, nic_addr+EN0_IMR);	/* Mask all intrs. again. */
 
-			if (ei_debug > 2)
-				printk(" autoirq is %d", dev->irq);
+			if (netif_msg_drv(ei_local))
+				pr_cont(" autoirq is %d", dev->irq);
 			if (dev->irq < 2)
 				dev->irq = word16 ? 10 : 5;
 		} else
@@ -310,7 +313,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
 	   share and the board will usually be enabled. */
 	i = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
 	if (i) {
-		printk (" unable to get IRQ %d.\n", dev->irq);
+		pr_cont(" unable to get IRQ %d.\n", dev->irq);
 		return i;
 	}
 
@@ -338,8 +341,8 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
 		return -ENOMEM;
 	}
 
-	printk(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
-		   model_name, dev->irq, dev->mem_start, dev->mem_end-1);
+	pr_cont(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
+		model_name, dev->irq, dev->mem_start, dev->mem_end-1);
 
 	ei_status.reset_8390 = wd_reset_8390;
 	ei_status.block_input = wd_block_input;
@@ -348,6 +351,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
 
 	dev->netdev_ops = &wd_netdev_ops;
 	NS8390_init(dev, 0);
+	ei_local->msg_enable = wd_msg_enable;
 
 #if 1
 	/* Enable interrupt generation on softconfig cards -- M.U */
@@ -385,9 +389,11 @@ static void
 wd_reset_8390(struct net_device *dev)
 {
 	int wd_cmd_port = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+	struct ei_device *ei_local = netdev_priv(dev);
 
 	outb(WD_RESET, wd_cmd_port);
-	if (ei_debug > 1) printk("resetting the WD80x3 t=%lu...", jiffies);
+	netif_dbg(ei_local, hw, dev, "resetting the WD80x3 t=%lu...\n",
+		  jiffies);
 	ei_status.txing = 0;
 
 	/* Set up the ASIC registers, just in case something changed them. */
@@ -395,7 +401,7 @@ wd_reset_8390(struct net_device *dev)
 	if (ei_status.word16)
 		outb(NIC16 | ((dev->mem_start>>19) & 0x1f), wd_cmd_port+WD_CMDREG5);
 
-	if (ei_debug > 1) printk("reset done\n");
+	netif_dbg(ei_local, hw, dev, "reset done\n");
 }
 
 /* Grab the 8390 specific header. Similar to the block_input routine, but
@@ -474,9 +480,9 @@ static int
 wd_close(struct net_device *dev)
 {
 	int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+	struct ei_device *ei_local = netdev_priv(dev);
 
-	if (ei_debug > 1)
-		printk("%s: Shutting down ethercard.\n", dev->name);
+	netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard.\n");
 	ei_close(dev);
 
 	/* Change from 16-bit to 8-bit shared memory so reboot works. */
@@ -502,10 +508,12 @@ module_param_array(io, int, NULL, 0);
 module_param_array(irq, int, NULL, 0);
 module_param_array(mem, int, NULL, 0);
 module_param_array(mem_end, int, NULL, 0);
+module_param_named(msg_enable, wd_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
 MODULE_PARM_DESC(io, "I/O base address(es)");
 MODULE_PARM_DESC(irq, "IRQ number(s) (ignored for PureData boards)");
 MODULE_PARM_DESC(mem, "memory base address(es)(ignored for PureData boards)");
 MODULE_PARM_DESC(mem_end, "memory end address(es)");
+MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
 MODULE_DESCRIPTION("ISA Western Digital wd8003/wd8013 ; SMC Elite, Elite16 ethernet driver");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index ae2a12b7db62..8308728fad05 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -44,6 +44,8 @@
 static const char version[] =
 	"8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
 
+static u32 zorro8390_msg_enable;
+
 #include "lib8390.c"
 
 #define DRV_NAME	"zorro8390"
@@ -86,9 +88,9 @@ static struct card_info {
 static void zorro8390_reset_8390(struct net_device *dev)
 {
 	unsigned long reset_start_time = jiffies;
+	struct ei_device *ei_local = netdev_priv(dev);
 
-	if (ei_debug > 1)
-		netdev_dbg(dev, "resetting - t=%ld...\n", jiffies);
+	netif_dbg(ei_local, hw, dev, "resetting - t=%ld...\n", jiffies);
 
 	z_writeb(z_readb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
 
@@ -119,8 +121,9 @@ static void zorro8390_get_8390_hdr(struct net_device *dev,
 	 * If it does, it's the last thing you'll see
 	 */
 	if (ei_status.dmaing) {
-		netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
-			   __func__, ei_status.dmaing, ei_status.irqlock);
+		netdev_warn(dev,
+			    "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
+			    __func__, ei_status.dmaing, ei_status.irqlock);
 		return;
 	}
 
@@ -230,7 +233,7 @@ static void zorro8390_block_output(struct net_device *dev, int count,
 	while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
 		if (time_after(jiffies, dma_start + 2 * HZ / 100)) {
 					/* 20ms */
-			netdev_err(dev, "timeout waiting for Tx RDC\n");
+			netdev_warn(dev, "timeout waiting for Tx RDC\n");
 			zorro8390_reset_8390(dev);
 			__NS8390_init(dev, 1);
 			break;
@@ -248,8 +251,9 @@ static int zorro8390_open(struct net_device *dev)
 
 static int zorro8390_close(struct net_device *dev)
 {
-	if (ei_debug > 1)
-		netdev_dbg(dev, "Shutting down ethercard\n");
+	struct ei_device *ei_local = netdev_priv(dev);
+
+	netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard\n");
 	__ei_close(dev);
 	return 0;
 }
@@ -293,6 +297,7 @@ static int zorro8390_init(struct net_device *dev, unsigned long board,
 	int err;
 	unsigned char SA_prom[32];
 	int start_page, stop_page;
+	struct ei_device *ei_local = netdev_priv(dev);
 	static u32 zorro8390_offsets[16] = {
 		0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
 		0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
@@ -383,6 +388,9 @@ static int zorro8390_init(struct net_device *dev, unsigned long board,
 
 	dev->netdev_ops = &zorro8390_netdev_ops;
 	__NS8390_init(dev, 0);
+
+	ei_local->msg_enable = zorro8390_msg_enable;
+
 	err = register_netdev(dev);
 	if (err) {
 		free_irq(IRQ_AMIGA_PORTS, dev);
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 75fb1d20d6fd..c0f68dcd1dc1 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -667,8 +667,8 @@ static u32 bfin_select_phc_clock(u32 input_clk, unsigned int *shift_result)
 	return 1000000000UL / ppn;
 }
 
-static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
-		struct ifreq *ifr, int cmd)
+static int bfin_mac_hwtstamp_set(struct net_device *netdev,
+				 struct ifreq *ifr)
 {
 	struct hwtstamp_config config;
 	struct bfin_mac_local *lp = netdev_priv(netdev);
@@ -824,6 +824,16 @@ static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
 		-EFAULT : 0;
 }
 
+static int bfin_mac_hwtstamp_get(struct net_device *netdev,
+				 struct ifreq *ifr)
+{
+	struct bfin_mac_local *lp = netdev_priv(netdev);
+
+	return copy_to_user(ifr->ifr_data, &lp->stamp_cfg,
+			    sizeof(lp->stamp_cfg)) ?
+		-EFAULT : 0;
+}
+
 static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
 {
 	struct bfin_mac_local *lp = netdev_priv(netdev);
@@ -1062,7 +1072,8 @@ static void bfin_phc_release(struct bfin_mac_local *lp)
 #else
 # define bfin_mac_hwtstamp_is_none(cfg) 0
 # define bfin_mac_hwtstamp_init(dev)
-# define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
+# define bfin_mac_hwtstamp_set(dev, ifr) (-EOPNOTSUPP)
+# define bfin_mac_hwtstamp_get(dev, ifr) (-EOPNOTSUPP)
 # define bfin_rx_hwtstamp(dev, skb)
 # define bfin_tx_hwtstamp(dev, skb)
 # define bfin_phc_init(netdev, dev) 0
@@ -1496,7 +1507,9 @@ static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 
 	switch (cmd) {
 	case SIOCSHWTSTAMP:
-		return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd);
+		return bfin_mac_hwtstamp_set(netdev, ifr);
+	case SIOCGHWTSTAMP:
+		return bfin_mac_hwtstamp_get(netdev, ifr);
 	default:
 		if (lp->phydev)
 			return phy_mii_ioctl(lp->phydev, ifr, cmd);
@@ -1544,7 +1557,6 @@ static int bfin_mac_open(struct net_device *dev)
 		return ret;
 
 	phy_start(lp->phydev);
-	phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
 	setup_system_regs(dev);
 	setup_mac_addr(dev->dev_addr);
 
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index e06694555144..c5d75e7aeeb6 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -25,7 +25,6 @@
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
 #include <linux/uaccess.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -1361,7 +1360,7 @@ static int greth_mdio_init(struct greth_private *greth)
 		timeout = jiffies + 6*HZ;
 		while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
 		}
-		genphy_read_status(greth->phy);
+		phy_read_status(greth->phy);
 		greth_link_change(greth->netdev);
 	}
 
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 46dfb1378c17..0cc21437478c 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -16,7 +16,6 @@
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/gpio.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/mii.h>
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 219be1bf3cfc..1517e9df5ba1 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -61,7 +61,6 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/mm.h>
 #include <linux/highmem.h>
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 65926a956575..18e542f7853d 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -17,7 +17,6 @@
 #include <linux/errno.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
@@ -42,9 +41,9 @@
 
 #include "7990.h"
 
-#define WRITERAP(lp,x) out_be16(lp->base + LANCE_RAP, (x))
-#define WRITERDP(lp,x) out_be16(lp->base + LANCE_RDP, (x))
-#define READRDP(lp) in_be16(lp->base + LANCE_RDP)
+#define WRITERAP(lp, x)	out_be16(lp->base + LANCE_RAP, (x))
+#define WRITERDP(lp, x)	out_be16(lp->base + LANCE_RDP, (x))
+#define READRDP(lp)	in_be16(lp->base + LANCE_RDP)
 
 #if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE)
 #include "hplance.h"
@@ -56,9 +55,9 @@
 #if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE)
 
 /* Lossage Factor Nine, Mr Sulu. */
-#define WRITERAP(lp,x) (lp->writerap(lp,x))
-#define WRITERDP(lp,x) (lp->writerdp(lp,x))
-#define READRDP(lp) (lp->readrdp(lp))
+#define WRITERAP(lp, x)	(lp->writerap(lp, x))
+#define WRITERDP(lp, x)	(lp->writerdp(lp, x))
+#define READRDP(lp)	(lp->readrdp(lp))
 
 #else
 
@@ -94,428 +93,436 @@ static inline __u16 READRDP(struct lance_private *lp)
 #ifdef UNDEF
 #define PRINT_RINGS() \
 do { \
-        int t; \
-        for (t=0; t < RX_RING_SIZE; t++) { \
-                printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n",\
-                       t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0,\
-                       ib->brx_ring[t].length,\
-                       ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits);\
-        }\
-        for (t=0; t < TX_RING_SIZE; t++) { \
-                printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n",\
-                       t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0,\
-                       ib->btx_ring[t].length,\
-                       ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits);\
-        }\
+	int t; \
+	for (t = 0; t < RX_RING_SIZE; t++) { \
+		printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n", \
+		       t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0, \
+		       ib->brx_ring[t].length, \
+		       ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits); \
+	} \
+	for (t = 0; t < TX_RING_SIZE; t++) { \
+		printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n", \
+		       t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0, \
+		       ib->btx_ring[t].length, \
+		       ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits); \
+	} \
 } while (0)
 #else
 #define PRINT_RINGS()
 #endif
 
 /* Load the CSR registers. The LANCE has to be STOPped when we do this! */
-static void load_csrs (struct lance_private *lp)
+static void load_csrs(struct lance_private *lp)
 {
-        volatile struct lance_init_block *aib = lp->lance_init_block;
-        int leptr;
+	volatile struct lance_init_block *aib = lp->lance_init_block;
+	int leptr;
 
-        leptr = LANCE_ADDR (aib);
+	leptr = LANCE_ADDR(aib);
 
-        WRITERAP(lp, LE_CSR1);                    /* load address of init block */
-        WRITERDP(lp, leptr & 0xFFFF);
-        WRITERAP(lp, LE_CSR2);
-        WRITERDP(lp, leptr >> 16);
-        WRITERAP(lp, LE_CSR3);
-        WRITERDP(lp, lp->busmaster_regval);       /* set byteswap/ALEctrl/byte ctrl */
+	WRITERAP(lp, LE_CSR1);                    /* load address of init block */
+	WRITERDP(lp, leptr & 0xFFFF);
+	WRITERAP(lp, LE_CSR2);
+	WRITERDP(lp, leptr >> 16);
+	WRITERAP(lp, LE_CSR3);
+	WRITERDP(lp, lp->busmaster_regval);       /* set byteswap/ALEctrl/byte ctrl */
 
-        /* Point back to csr0 */
-        WRITERAP(lp, LE_CSR0);
+	/* Point back to csr0 */
+	WRITERAP(lp, LE_CSR0);
 }
 
 /* #define to 0 or 1 appropriately */
 #define DEBUG_IRING 0
 /* Set up the Lance Rx and Tx rings and the init block */
-static void lance_init_ring (struct net_device *dev)
+static void lance_init_ring(struct net_device *dev)
 {
-        struct lance_private *lp = netdev_priv(dev);
-        volatile struct lance_init_block *ib = lp->init_block;
-        volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
-        int leptr;
-        int i;
-
-        aib = lp->lance_init_block;
-
-        lp->rx_new = lp->tx_new = 0;
-        lp->rx_old = lp->tx_old = 0;
-
-        ib->mode = LE_MO_PROM;                             /* normal, enable Tx & Rx */
-
-        /* Copy the ethernet address to the lance init block
-         * Notice that we do a byteswap if we're big endian.
-         * [I think this is the right criterion; at least, sunlance,
-         * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
-         * However, the datasheet says that the BSWAP bit doesn't affect
-         * the init block, so surely it should be low byte first for
-         * everybody? Um.]
-         * We could define the ib->physaddr as three 16bit values and
-         * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
-         */
+	struct lance_private *lp = netdev_priv(dev);
+	volatile struct lance_init_block *ib = lp->init_block;
+	volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
+	int leptr;
+	int i;
+
+	aib = lp->lance_init_block;
+
+	lp->rx_new = lp->tx_new = 0;
+	lp->rx_old = lp->tx_old = 0;
+
+	ib->mode = LE_MO_PROM;                             /* normal, enable Tx & Rx */
+
+	/* Copy the ethernet address to the lance init block
+	 * Notice that we do a byteswap if we're big endian.
+	 * [I think this is the right criterion; at least, sunlance,
+	 * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
+	 * However, the datasheet says that the BSWAP bit doesn't affect
+	 * the init block, so surely it should be low byte first for
+	 * everybody? Um.]
+	 * We could define the ib->physaddr as three 16bit values and
+	 * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
+	 */
 #ifdef __BIG_ENDIAN
-        ib->phys_addr [0] = dev->dev_addr [1];
-        ib->phys_addr [1] = dev->dev_addr [0];
-        ib->phys_addr [2] = dev->dev_addr [3];
-        ib->phys_addr [3] = dev->dev_addr [2];
-        ib->phys_addr [4] = dev->dev_addr [5];
-        ib->phys_addr [5] = dev->dev_addr [4];
+	ib->phys_addr[0] = dev->dev_addr[1];
+	ib->phys_addr[1] = dev->dev_addr[0];
+	ib->phys_addr[2] = dev->dev_addr[3];
+	ib->phys_addr[3] = dev->dev_addr[2];
+	ib->phys_addr[4] = dev->dev_addr[5];
+	ib->phys_addr[5] = dev->dev_addr[4];
 #else
-        for (i=0; i<6; i++)
-           ib->phys_addr[i] = dev->dev_addr[i];
+	for (i = 0; i < 6; i++)
+	       ib->phys_addr[i] = dev->dev_addr[i];
 #endif
 
-        if (DEBUG_IRING)
-                printk ("TX rings:\n");
+	if (DEBUG_IRING)
+		printk("TX rings:\n");
 
 	lp->tx_full = 0;
-        /* Setup the Tx ring entries */
-        for (i = 0; i < (1<<lp->lance_log_tx_bufs); i++) {
-                leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
-                ib->btx_ring [i].tmd0      = leptr;
-                ib->btx_ring [i].tmd1_hadr = leptr >> 16;
-                ib->btx_ring [i].tmd1_bits = 0;
-                ib->btx_ring [i].length    = 0xf000; /* The ones required by tmd2 */
-                ib->btx_ring [i].misc      = 0;
-                if (DEBUG_IRING)
-                   printk ("%d: 0x%8.8x\n", i, leptr);
-        }
-
-        /* Setup the Rx ring entries */
-        if (DEBUG_IRING)
-                printk ("RX rings:\n");
-        for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) {
-                leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
-
-                ib->brx_ring [i].rmd0      = leptr;
-                ib->brx_ring [i].rmd1_hadr = leptr >> 16;
-                ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
-                /* 0xf000 == bits that must be one (reserved, presumably) */
-                ib->brx_ring [i].length    = -RX_BUFF_SIZE | 0xf000;
-                ib->brx_ring [i].mblength  = 0;
-                if (DEBUG_IRING)
-                        printk ("%d: 0x%8.8x\n", i, leptr);
-        }
-
-        /* Setup the initialization block */
-
-        /* Setup rx descriptor pointer */
-        leptr = LANCE_ADDR(&aib->brx_ring);
-        ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
-        ib->rx_ptr = leptr;
-        if (DEBUG_IRING)
-                printk ("RX ptr: %8.8x\n", leptr);
-
-        /* Setup tx descriptor pointer */
-        leptr = LANCE_ADDR(&aib->btx_ring);
-        ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
-        ib->tx_ptr = leptr;
-        if (DEBUG_IRING)
-                printk ("TX ptr: %8.8x\n", leptr);
-
-        /* Clear the multicast filter */
-        ib->filter [0] = 0;
-        ib->filter [1] = 0;
-        PRINT_RINGS();
+	/* Setup the Tx ring entries */
+	for (i = 0; i < (1 << lp->lance_log_tx_bufs); i++) {
+		leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
+		ib->btx_ring[i].tmd0      = leptr;
+		ib->btx_ring[i].tmd1_hadr = leptr >> 16;
+		ib->btx_ring[i].tmd1_bits = 0;
+		ib->btx_ring[i].length    = 0xf000; /* The ones required by tmd2 */
+		ib->btx_ring[i].misc      = 0;
+		if (DEBUG_IRING)
+			printk("%d: 0x%8.8x\n", i, leptr);
+	}
+
+	/* Setup the Rx ring entries */
+	if (DEBUG_IRING)
+		printk("RX rings:\n");
+	for (i = 0; i < (1 << lp->lance_log_rx_bufs); i++) {
+		leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
+
+		ib->brx_ring[i].rmd0      = leptr;
+		ib->brx_ring[i].rmd1_hadr = leptr >> 16;
+		ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
+		/* 0xf000 == bits that must be one (reserved, presumably) */
+		ib->brx_ring[i].length    = -RX_BUFF_SIZE | 0xf000;
+		ib->brx_ring[i].mblength  = 0;
+		if (DEBUG_IRING)
+			printk("%d: 0x%8.8x\n", i, leptr);
+	}
+
+	/* Setup the initialization block */
+
+	/* Setup rx descriptor pointer */
+	leptr = LANCE_ADDR(&aib->brx_ring);
+	ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
+	ib->rx_ptr = leptr;
+	if (DEBUG_IRING)
+		printk("RX ptr: %8.8x\n", leptr);
+
+	/* Setup tx descriptor pointer */
+	leptr = LANCE_ADDR(&aib->btx_ring);
+	ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
+	ib->tx_ptr = leptr;
+	if (DEBUG_IRING)
+		printk("TX ptr: %8.8x\n", leptr);
+
+	/* Clear the multicast filter */
+	ib->filter[0] = 0;
+	ib->filter[1] = 0;
+	PRINT_RINGS();
 }
 
 /* LANCE must be STOPped before we do this, too... */
-static int init_restart_lance (struct lance_private *lp)
+static int init_restart_lance(struct lance_private *lp)
 {
-        int i;
+	int i;
 
-        WRITERAP(lp, LE_CSR0);
-        WRITERDP(lp, LE_C0_INIT);
+	WRITERAP(lp, LE_CSR0);
+	WRITERDP(lp, LE_C0_INIT);
 
-        /* Need a hook here for sunlance ledma stuff */
+	/* Need a hook here for sunlance ledma stuff */
 
-        /* Wait for the lance to complete initialization */
-        for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
-                barrier();
-        if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
-                printk ("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
-                return -1;
-        }
+	/* Wait for the lance to complete initialization */
+	for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
+		barrier();
+	if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
+		printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
+		return -1;
+	}
 
-        /* Clear IDON by writing a "1", enable interrupts and start lance */
-        WRITERDP(lp, LE_C0_IDON);
-        WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
+	/* Clear IDON by writing a "1", enable interrupts and start lance */
+	WRITERDP(lp, LE_C0_IDON);
+	WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
 
-        return 0;
+	return 0;
 }
 
-static int lance_reset (struct net_device *dev)
+static int lance_reset(struct net_device *dev)
 {
-        struct lance_private *lp = netdev_priv(dev);
-        int status;
+	struct lance_private *lp = netdev_priv(dev);
+	int status;
 
-        /* Stop the lance */
-        WRITERAP(lp, LE_CSR0);
-        WRITERDP(lp, LE_C0_STOP);
+	/* Stop the lance */
+	WRITERAP(lp, LE_CSR0);
+	WRITERDP(lp, LE_C0_STOP);
 
-        load_csrs (lp);
-        lance_init_ring (dev);
-        dev->trans_start = jiffies; /* prevent tx timeout */
-        status = init_restart_lance (lp);
+	load_csrs(lp);
+	lance_init_ring(dev);
+	dev->trans_start = jiffies; /* prevent tx timeout */
+	status = init_restart_lance(lp);
 #ifdef DEBUG_DRIVER
-        printk ("Lance restart=%d\n", status);
+	printk("Lance restart=%d\n", status);
 #endif
-        return status;
+	return status;
 }
 
-static int lance_rx (struct net_device *dev)
+static int lance_rx(struct net_device *dev)
 {
-        struct lance_private *lp = netdev_priv(dev);
-        volatile struct lance_init_block *ib = lp->init_block;
-        volatile struct lance_rx_desc *rd;
-        unsigned char bits;
+	struct lance_private *lp = netdev_priv(dev);
+	volatile struct lance_init_block *ib = lp->init_block;
+	volatile struct lance_rx_desc *rd;
+	unsigned char bits;
 #ifdef TEST_HITS
-        int i;
+	int i;
 #endif
 
 #ifdef TEST_HITS
-        printk ("[");
-        for (i = 0; i < RX_RING_SIZE; i++) {
-                if (i == lp->rx_new)
-                        printk ("%s",
-                                ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X");
-                else
-                        printk ("%s",
-                                ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1");
-        }
-        printk ("]");
+	printk("[");
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		if (i == lp->rx_new)
+			printk("%s",
+			       ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "_" : "X");
+		else
+			printk("%s",
+			      ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "." : "1");
+	}
+	printk("]");
 #endif
 #ifdef CONFIG_HP300
 	blinken_leds(0x40, 0);
 #endif
-        WRITERDP(lp, LE_C0_RINT | LE_C0_INEA);     /* ack Rx int, reenable ints */
-        for (rd = &ib->brx_ring [lp->rx_new];     /* For each Rx ring we own... */
-             !((bits = rd->rmd1_bits) & LE_R1_OWN);
-             rd = &ib->brx_ring [lp->rx_new]) {
-
-                /* We got an incomplete frame? */
-                if ((bits & LE_R1_POK) != LE_R1_POK) {
-                        dev->stats.rx_over_errors++;
-                        dev->stats.rx_errors++;
-                        continue;
-                } else if (bits & LE_R1_ERR) {
-                        /* Count only the end frame as a rx error,
-                         * not the beginning
-                         */
-                        if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
-                        if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
-                        if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
-                        if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
-                        if (bits & LE_R1_EOP) dev->stats.rx_errors++;
-                } else {
+	WRITERDP(lp, LE_C0_RINT | LE_C0_INEA);     /* ack Rx int, reenable ints */
+	for (rd = &ib->brx_ring[lp->rx_new];     /* For each Rx ring we own... */
+	     !((bits = rd->rmd1_bits) & LE_R1_OWN);
+	     rd = &ib->brx_ring[lp->rx_new]) {
+
+		/* We got an incomplete frame? */
+		if ((bits & LE_R1_POK) != LE_R1_POK) {
+			dev->stats.rx_over_errors++;
+			dev->stats.rx_errors++;
+			continue;
+		} else if (bits & LE_R1_ERR) {
+			/* Count only the end frame as a rx error,
+			 * not the beginning
+			 */
+			if (bits & LE_R1_BUF)
+				dev->stats.rx_fifo_errors++;
+			if (bits & LE_R1_CRC)
+				dev->stats.rx_crc_errors++;
+			if (bits & LE_R1_OFL)
+				dev->stats.rx_over_errors++;
+			if (bits & LE_R1_FRA)
+				dev->stats.rx_frame_errors++;
+			if (bits & LE_R1_EOP)
+				dev->stats.rx_errors++;
+		} else {
 			int len = (rd->mblength & 0xfff) - 4;
 			struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
 
-                        if (!skb) {
-                                dev->stats.rx_dropped++;
-                                rd->mblength = 0;
-                                rd->rmd1_bits = LE_R1_OWN;
-                                lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
-                                return 0;
-                        }
-
-                        skb_reserve (skb, 2);           /* 16 byte align */
-                        skb_put (skb, len);             /* make room */
-                        skb_copy_to_linear_data(skb,
-                                         (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
-                                         len);
-                        skb->protocol = eth_type_trans (skb, dev);
-			netif_rx (skb);
+			if (!skb) {
+				dev->stats.rx_dropped++;
+				rd->mblength = 0;
+				rd->rmd1_bits = LE_R1_OWN;
+				lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+				return 0;
+			}
+
+			skb_reserve(skb, 2);           /* 16 byte align */
+			skb_put(skb, len);             /* make room */
+			skb_copy_to_linear_data(skb,
+					 (unsigned char *)&(ib->rx_buf[lp->rx_new][0]),
+					 len);
+			skb->protocol = eth_type_trans(skb, dev);
+			netif_rx(skb);
 			dev->stats.rx_packets++;
 			dev->stats.rx_bytes += len;
-                }
-
-                /* Return the packet to the pool */
-                rd->mblength = 0;
-                rd->rmd1_bits = LE_R1_OWN;
-                lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
-        }
-        return 0;
+		}
+
+		/* Return the packet to the pool */
+		rd->mblength = 0;
+		rd->rmd1_bits = LE_R1_OWN;
+		lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+	}
+	return 0;
 }
 
-static int lance_tx (struct net_device *dev)
+static int lance_tx(struct net_device *dev)
 {
-        struct lance_private *lp = netdev_priv(dev);
-        volatile struct lance_init_block *ib = lp->init_block;
-        volatile struct lance_tx_desc *td;
-        int i, j;
-        int status;
+	struct lance_private *lp = netdev_priv(dev);
+	volatile struct lance_init_block *ib = lp->init_block;
+	volatile struct lance_tx_desc *td;
+	int i, j;
+	int status;
 
 #ifdef CONFIG_HP300
 	blinken_leds(0x80, 0);
 #endif
-        /* csr0 is 2f3 */
-        WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
-        /* csr0 is 73 */
-
-        j = lp->tx_old;
-        for (i = j; i != lp->tx_new; i = j) {
-                td = &ib->btx_ring [i];
-
-                /* If we hit a packet not owned by us, stop */
-                if (td->tmd1_bits & LE_T1_OWN)
-                        break;
-
-                if (td->tmd1_bits & LE_T1_ERR) {
-                        status = td->misc;
-
-                        dev->stats.tx_errors++;
-                        if (status & LE_T3_RTY)  dev->stats.tx_aborted_errors++;
-                        if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
-
-                        if (status & LE_T3_CLOS) {
-                                dev->stats.tx_carrier_errors++;
-                                if (lp->auto_select) {
-                                        lp->tpe = 1 - lp->tpe;
-                                        printk("%s: Carrier Lost, trying %s\n",
-                                               dev->name, lp->tpe?"TPE":"AUI");
-                                        /* Stop the lance */
-                                        WRITERAP(lp, LE_CSR0);
-                                        WRITERDP(lp, LE_C0_STOP);
-                                        lance_init_ring (dev);
-                                        load_csrs (lp);
-                                        init_restart_lance (lp);
-                                        return 0;
-                                }
-                        }
-
-                        /* buffer errors and underflows turn off the transmitter */
-                        /* Restart the adapter */
-                        if (status & (LE_T3_BUF|LE_T3_UFL)) {
-                                dev->stats.tx_fifo_errors++;
-
-                                printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
-                                        dev->name);
-                                /* Stop the lance */
-                                WRITERAP(lp, LE_CSR0);
-                                WRITERDP(lp, LE_C0_STOP);
-                                lance_init_ring (dev);
-                                load_csrs (lp);
-                                init_restart_lance (lp);
-                                return 0;
-                        }
-                } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
-                        /*
-                         * So we don't count the packet more than once.
-                         */
-                        td->tmd1_bits &= ~(LE_T1_POK);
-
-                        /* One collision before packet was sent. */
-                        if (td->tmd1_bits & LE_T1_EONE)
-                                dev->stats.collisions++;
-
-                        /* More than one collision, be optimistic. */
-                        if (td->tmd1_bits & LE_T1_EMORE)
-                                dev->stats.collisions += 2;
-
-                        dev->stats.tx_packets++;
-                }
-
-                j = (j + 1) & lp->tx_ring_mod_mask;
-        }
-        lp->tx_old = j;
-        WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
-        return 0;
+	/* csr0 is 2f3 */
+	WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
+	/* csr0 is 73 */
+
+	j = lp->tx_old;
+	for (i = j; i != lp->tx_new; i = j) {
+		td = &ib->btx_ring[i];
+
+		/* If we hit a packet not owned by us, stop */
+		if (td->tmd1_bits & LE_T1_OWN)
+			break;
+
+		if (td->tmd1_bits & LE_T1_ERR) {
+			status = td->misc;
+
+			dev->stats.tx_errors++;
+			if (status & LE_T3_RTY)
+				dev->stats.tx_aborted_errors++;
+			if (status & LE_T3_LCOL)
+				dev->stats.tx_window_errors++;
+
+			if (status & LE_T3_CLOS) {
+				dev->stats.tx_carrier_errors++;
+				if (lp->auto_select) {
+					lp->tpe = 1 - lp->tpe;
+					printk("%s: Carrier Lost, trying %s\n",
+					       dev->name,
+					       lp->tpe ? "TPE" : "AUI");
+					/* Stop the lance */
+					WRITERAP(lp, LE_CSR0);
+					WRITERDP(lp, LE_C0_STOP);
+					lance_init_ring(dev);
+					load_csrs(lp);
+					init_restart_lance(lp);
+					return 0;
+				}
+			}
+
+			/* buffer errors and underflows turn off the transmitter */
+			/* Restart the adapter */
+			if (status & (LE_T3_BUF|LE_T3_UFL)) {
+				dev->stats.tx_fifo_errors++;
+
+				printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+				       dev->name);
+				/* Stop the lance */
+				WRITERAP(lp, LE_CSR0);
+				WRITERDP(lp, LE_C0_STOP);
+				lance_init_ring(dev);
+				load_csrs(lp);
+				init_restart_lance(lp);
+				return 0;
+			}
+		} else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
+			/*
+			 * So we don't count the packet more than once.
+			 */
+			td->tmd1_bits &= ~(LE_T1_POK);
+
+			/* One collision before packet was sent. */
+			if (td->tmd1_bits & LE_T1_EONE)
+				dev->stats.collisions++;
+
+			/* More than one collision, be optimistic. */
+			if (td->tmd1_bits & LE_T1_EMORE)
+				dev->stats.collisions += 2;
+
+			dev->stats.tx_packets++;
+		}
+
+		j = (j + 1) & lp->tx_ring_mod_mask;
+	}
+	lp->tx_old = j;
+	WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
+	return 0;
 }
 
 static irqreturn_t
-lance_interrupt (int irq, void *dev_id)
+lance_interrupt(int irq, void *dev_id)
 {
-        struct net_device *dev = (struct net_device *)dev_id;
-        struct lance_private *lp = netdev_priv(dev);
-        int csr0;
+	struct net_device *dev = (struct net_device *)dev_id;
+	struct lance_private *lp = netdev_priv(dev);
+	int csr0;
 
-	spin_lock (&lp->devlock);
+	spin_lock(&lp->devlock);
 
-        WRITERAP(lp, LE_CSR0);              /* LANCE Controller Status */
-        csr0 = READRDP(lp);
+	WRITERAP(lp, LE_CSR0);              /* LANCE Controller Status */
+	csr0 = READRDP(lp);
 
-        PRINT_RINGS();
+	PRINT_RINGS();
 
-        if (!(csr0 & LE_C0_INTR)) {     /* Check if any interrupt has */
-		spin_unlock (&lp->devlock);
-                return IRQ_NONE;        /* been generated by the Lance. */
+	if (!(csr0 & LE_C0_INTR)) {     /* Check if any interrupt has */
+		spin_unlock(&lp->devlock);
+		return IRQ_NONE;        /* been generated by the Lance. */
 	}
 
-        /* Acknowledge all the interrupt sources ASAP */
-        WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
-
-        if ((csr0 & LE_C0_ERR)) {
-                /* Clear the error condition */
-                WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
-        }
-
-        if (csr0 & LE_C0_RINT)
-                lance_rx (dev);
-
-        if (csr0 & LE_C0_TINT)
-                lance_tx (dev);
-
-        /* Log misc errors. */
-        if (csr0 & LE_C0_BABL)
-                dev->stats.tx_errors++;       /* Tx babble. */
-        if (csr0 & LE_C0_MISS)
-                dev->stats.rx_errors++;       /* Missed a Rx frame. */
-        if (csr0 & LE_C0_MERR) {
-                printk("%s: Bus master arbitration failure, status %4.4x.\n",
-                       dev->name, csr0);
-                /* Restart the chip. */
-                WRITERDP(lp, LE_C0_STRT);
-        }
-
-        if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
+	/* Acknowledge all the interrupt sources ASAP */
+	WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
+
+	if ((csr0 & LE_C0_ERR)) {
+		/* Clear the error condition */
+		WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
+	}
+
+	if (csr0 & LE_C0_RINT)
+		lance_rx(dev);
+
+	if (csr0 & LE_C0_TINT)
+		lance_tx(dev);
+
+	/* Log misc errors. */
+	if (csr0 & LE_C0_BABL)
+		dev->stats.tx_errors++;       /* Tx babble. */
+	if (csr0 & LE_C0_MISS)
+		dev->stats.rx_errors++;       /* Missed a Rx frame. */
+	if (csr0 & LE_C0_MERR) {
+		printk("%s: Bus master arbitration failure, status %4.4x.\n",
+		       dev->name, csr0);
+		/* Restart the chip. */
+		WRITERDP(lp, LE_C0_STRT);
+	}
+
+	if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
 		lp->tx_full = 0;
-		netif_wake_queue (dev);
-        }
+		netif_wake_queue(dev);
+	}
 
-        WRITERAP(lp, LE_CSR0);
-        WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
+	WRITERAP(lp, LE_CSR0);
+	WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
 
-	spin_unlock (&lp->devlock);
+	spin_unlock(&lp->devlock);
 	return IRQ_HANDLED;
 }
 
-int lance_open (struct net_device *dev)
+int lance_open(struct net_device *dev)
 {
-        struct lance_private *lp = netdev_priv(dev);
+	struct lance_private *lp = netdev_priv(dev);
 	int res;
 
-        /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
-        if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
-                return -EAGAIN;
+	/* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
+	if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
+		return -EAGAIN;
 
-        res = lance_reset(dev);
+	res = lance_reset(dev);
 	spin_lock_init(&lp->devlock);
-	netif_start_queue (dev);
+	netif_start_queue(dev);
 
 	return res;
 }
 EXPORT_SYMBOL_GPL(lance_open);
 
-int lance_close (struct net_device *dev)
+int lance_close(struct net_device *dev)
 {
-        struct lance_private *lp = netdev_priv(dev);
+	struct lance_private *lp = netdev_priv(dev);
 
-	netif_stop_queue (dev);
+	netif_stop_queue(dev);
 
-        /* Stop the LANCE */
-        WRITERAP(lp, LE_CSR0);
-        WRITERDP(lp, LE_C0_STOP);
+	/* Stop the LANCE */
+	WRITERAP(lp, LE_CSR0);
+	WRITERDP(lp, LE_C0_STOP);
 
-        free_irq(lp->irq, dev);
+	free_irq(lp->irq, dev);
 
-        return 0;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(lance_close);
 
@@ -524,122 +531,122 @@ void lance_tx_timeout(struct net_device *dev)
 	printk("lance_tx_timeout\n");
 	lance_reset(dev);
 	dev->trans_start = jiffies; /* prevent tx timeout */
-	netif_wake_queue (dev);
+	netif_wake_queue(dev);
 }
 EXPORT_SYMBOL_GPL(lance_tx_timeout);
 
-int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
+int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-        struct lance_private *lp = netdev_priv(dev);
-        volatile struct lance_init_block *ib = lp->init_block;
-        int entry, skblen, len;
-        static int outs;
+	struct lance_private *lp = netdev_priv(dev);
+	volatile struct lance_init_block *ib = lp->init_block;
+	int entry, skblen, len;
+	static int outs;
 	unsigned long flags;
 
-        if (!TX_BUFFS_AVAIL)
-                return NETDEV_TX_LOCKED;
+	if (!TX_BUFFS_AVAIL)
+		return NETDEV_TX_LOCKED;
 
-	netif_stop_queue (dev);
+	netif_stop_queue(dev);
 
-        skblen = skb->len;
+	skblen = skb->len;
 
 #ifdef DEBUG_DRIVER
-        /* dump the packet */
-        {
-                int i;
-
-                for (i = 0; i < 64; i++) {
-                        if ((i % 16) == 0)
-                                printk ("\n");
-                        printk ("%2.2x ", skb->data [i]);
-                }
-        }
+	/* dump the packet */
+	{
+		int i;
+
+		for (i = 0; i < 64; i++) {
+			if ((i % 16) == 0)
+				printk("\n");
+			printk("%2.2x ", skb->data[i]);
+		}
+	}
 #endif
-        len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
-        entry = lp->tx_new & lp->tx_ring_mod_mask;
-        ib->btx_ring [entry].length = (-len) | 0xf000;
-        ib->btx_ring [entry].misc = 0;
+	len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
+	entry = lp->tx_new & lp->tx_ring_mod_mask;
+	ib->btx_ring[entry].length = (-len) | 0xf000;
+	ib->btx_ring[entry].misc = 0;
 
 	if (skb->len < ETH_ZLEN)
 		memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
-        skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
+	skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
 
-        /* Now, give the packet to the lance */
-        ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
-        lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
+	/* Now, give the packet to the lance */
+	ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
+	lp->tx_new = (lp->tx_new + 1) & lp->tx_ring_mod_mask;
 
-        outs++;
-        /* Kick the lance: transmit now */
-        WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
-        dev_kfree_skb (skb);
+	outs++;
+	/* Kick the lance: transmit now */
+	WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
+	dev_kfree_skb(skb);
 
-	spin_lock_irqsave (&lp->devlock, flags);
-        if (TX_BUFFS_AVAIL)
-		netif_start_queue (dev);
+	spin_lock_irqsave(&lp->devlock, flags);
+	if (TX_BUFFS_AVAIL)
+		netif_start_queue(dev);
 	else
 		lp->tx_full = 1;
-	spin_unlock_irqrestore (&lp->devlock, flags);
+	spin_unlock_irqrestore(&lp->devlock, flags);
 
-        return NETDEV_TX_OK;
+	return NETDEV_TX_OK;
 }
 EXPORT_SYMBOL_GPL(lance_start_xmit);
 
 /* taken from the depca driver via a2065.c */
-static void lance_load_multicast (struct net_device *dev)
+static void lance_load_multicast(struct net_device *dev)
 {
-        struct lance_private *lp = netdev_priv(dev);
-        volatile struct lance_init_block *ib = lp->init_block;
-        volatile u16 *mcast_table = (u16 *)&ib->filter;
+	struct lance_private *lp = netdev_priv(dev);
+	volatile struct lance_init_block *ib = lp->init_block;
+	volatile u16 *mcast_table = (u16 *)&ib->filter;
 	struct netdev_hw_addr *ha;
-        u32 crc;
-
-        /* set all multicast bits */
-        if (dev->flags & IFF_ALLMULTI){
-                ib->filter [0] = 0xffffffff;
-                ib->filter [1] = 0xffffffff;
-                return;
-        }
-        /* clear the multicast filter */
-        ib->filter [0] = 0;
-        ib->filter [1] = 0;
-
-        /* Add addresses */
+	u32 crc;
+
+	/* set all multicast bits */
+	if (dev->flags & IFF_ALLMULTI) {
+		ib->filter[0] = 0xffffffff;
+		ib->filter[1] = 0xffffffff;
+		return;
+	}
+	/* clear the multicast filter */
+	ib->filter[0] = 0;
+	ib->filter[1] = 0;
+
+	/* Add addresses */
 	netdev_for_each_mc_addr(ha, dev) {
 		crc = ether_crc_le(6, ha->addr);
-                crc = crc >> 26;
-                mcast_table [crc >> 4] |= 1 << (crc & 0xf);
-        }
+		crc = crc >> 26;
+		mcast_table[crc >> 4] |= 1 << (crc & 0xf);
+	}
 }
 
 
-void lance_set_multicast (struct net_device *dev)
+void lance_set_multicast(struct net_device *dev)
 {
-        struct lance_private *lp = netdev_priv(dev);
-        volatile struct lance_init_block *ib = lp->init_block;
+	struct lance_private *lp = netdev_priv(dev);
+	volatile struct lance_init_block *ib = lp->init_block;
 	int stopped;
 
 	stopped = netif_queue_stopped(dev);
 	if (!stopped)
-		netif_stop_queue (dev);
-
-        while (lp->tx_old != lp->tx_new)
-                schedule();
+		netif_stop_queue(dev);
 
-        WRITERAP(lp, LE_CSR0);
-        WRITERDP(lp, LE_C0_STOP);
-        lance_init_ring (dev);
+	while (lp->tx_old != lp->tx_new)
+		schedule();
 
-        if (dev->flags & IFF_PROMISC) {
-                ib->mode |= LE_MO_PROM;
-        } else {
-                ib->mode &= ~LE_MO_PROM;
-                lance_load_multicast (dev);
-        }
-        load_csrs (lp);
-        init_restart_lance (lp);
+	WRITERAP(lp, LE_CSR0);
+	WRITERDP(lp, LE_C0_STOP);
+	lance_init_ring(dev);
+
+	if (dev->flags & IFF_PROMISC) {
+		ib->mode |= LE_MO_PROM;
+	} else {
+		ib->mode &= ~LE_MO_PROM;
+		lance_load_multicast(dev);
+	}
+	load_csrs(lp);
+	init_restart_lance(lp);
 
 	if (!stopped)
-		netif_start_queue (dev);
+		netif_start_queue(dev);
 }
 EXPORT_SYMBOL_GPL(lance_set_multicast);
 
@@ -648,10 +655,10 @@ void lance_poll(struct net_device *dev)
 {
 	struct lance_private *lp = netdev_priv(dev);
 
-	spin_lock (&lp->devlock);
+	spin_lock(&lp->devlock);
 	WRITERAP(lp, LE_CSR0);
 	WRITERDP(lp, LE_C0_STRT);
-	spin_unlock (&lp->devlock);
+	spin_unlock(&lp->devlock);
 	lance_interrupt(dev->irq, dev);
 }
 #endif
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
index ae33a99bf476..e9e0be313804 100644
--- a/drivers/net/ethernet/amd/7990.h
+++ b/drivers/net/ethernet/amd/7990.h
@@ -35,33 +35,32 @@
 #define LANCE_LOG_RX_BUFFERS 3
 #endif
 
-#define TX_RING_SIZE (1<<LANCE_LOG_TX_BUFFERS)
-#define RX_RING_SIZE (1<<LANCE_LOG_RX_BUFFERS)
-#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
-#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
-#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
-#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
-#define PKT_BUFF_SIZE (1544)
-#define RX_BUFF_SIZE PKT_BUFF_SIZE
-#define TX_BUFF_SIZE PKT_BUFF_SIZE
+#define TX_RING_SIZE		(1 << LANCE_LOG_TX_BUFFERS)
+#define RX_RING_SIZE		(1 << LANCE_LOG_RX_BUFFERS)
+#define TX_RING_MOD_MASK	(TX_RING_SIZE - 1)
+#define RX_RING_MOD_MASK	(RX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS	((LANCE_LOG_TX_BUFFERS) << 29)
+#define RX_RING_LEN_BITS	((LANCE_LOG_RX_BUFFERS) << 29)
+#define PKT_BUFF_SIZE		(1544)
+#define RX_BUFF_SIZE		PKT_BUFF_SIZE
+#define TX_BUFF_SIZE		PKT_BUFF_SIZE
 
 /* Each receive buffer is described by a receive message descriptor (RMD) */
 struct lance_rx_desc {
-	volatile unsigned short rmd0;        /* low address of packet */
-	volatile unsigned char  rmd1_bits;   /* descriptor bits */
-	volatile unsigned char  rmd1_hadr;   /* high address of packet */
-	volatile short    length;    	    /* This length is 2s complement (negative)!
-				     * Buffer length
-				     */
-	volatile unsigned short mblength;    /* Actual number of bytes received */
+	volatile unsigned short rmd0;	    /* low address of packet */
+	volatile unsigned char  rmd1_bits;  /* descriptor bits */
+	volatile unsigned char  rmd1_hadr;  /* high address of packet */
+	volatile short    length;	    /* This length is 2s complement (negative)!
+					     * Buffer length */
+	volatile unsigned short mblength;   /* Actual number of bytes received */
 };
 
 /* Ditto for TMD: */
 struct lance_tx_desc {
-	volatile unsigned short tmd0;        /* low address of packet */
-	volatile unsigned char  tmd1_bits;   /* descriptor bits */
-	volatile unsigned char  tmd1_hadr;   /* high address of packet */
-	volatile short    length;       	    /* Length is 2s complement (negative)! */
+	volatile unsigned short tmd0;	    /* low address of packet */
+	volatile unsigned char  tmd1_bits;  /* descriptor bits */
+	volatile unsigned char  tmd1_hadr;  /* high address of packet */
+	volatile short    length;	    /* Length is 2s complement (negative)! */
 	volatile unsigned short misc;
 };
 
@@ -71,181 +70,178 @@ struct lance_tx_desc {
  * init block,the Tx and Rx rings and the buffers together in memory:
  */
 struct lance_init_block {
-        volatile unsigned short mode;            /* Pre-set mode (reg. 15) */
-        volatile unsigned char phys_addr[6];     /* Physical ethernet address */
-        volatile unsigned filter[2];             /* Multicast filter (64 bits) */
-
-        /* Receive and transmit ring base, along with extra bits. */
-        volatile unsigned short rx_ptr;          /* receive descriptor addr */
-        volatile unsigned short rx_len;          /* receive len and high addr */
-        volatile unsigned short tx_ptr;          /* transmit descriptor addr */
-        volatile unsigned short tx_len;          /* transmit len and high addr */
-
-        /* The Tx and Rx ring entries must be aligned on 8-byte boundaries.
-         * This will be true if this whole struct is 8-byte aligned.
-         */
-        volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
-        volatile struct lance_rx_desc brx_ring[RX_RING_SIZE];
-
-        volatile char   tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
-        volatile char   rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
-        /* we use this just to make the struct big enough that we can move its startaddr
-         * in order to force alignment to an eight byte boundary.
-         */
+	volatile unsigned short mode;		/* Pre-set mode (reg. 15) */
+	volatile unsigned char phys_addr[6];	/* Physical ethernet address */
+	volatile unsigned filter[2];		/* Multicast filter (64 bits) */
+
+	/* Receive and transmit ring base, along with extra bits. */
+	volatile unsigned short rx_ptr;		/* receive descriptor addr */
+	volatile unsigned short rx_len;		/* receive len and high addr */
+	volatile unsigned short tx_ptr;		/* transmit descriptor addr */
+	volatile unsigned short tx_len;		/* transmit len and high addr */
+
+	/* The Tx and Rx ring entries must be aligned on 8-byte boundaries.
+	 * This will be true if this whole struct is 8-byte aligned.
+	 */
+	volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
+	volatile struct lance_rx_desc brx_ring[RX_RING_SIZE];
+
+	volatile char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
+	volatile char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE];
+	/* we use this just to make the struct big enough that we can move its startaddr
+	 * in order to force alignment to an eight byte boundary.
+	 */
 };
 
 /* This is where we keep all the stuff the driver needs to know about.
  * I'm definitely unhappy about the mechanism for allowing specific
  * drivers to add things...
  */
-struct lance_private
-{
-        char *name;
+struct lance_private {
+	const char *name;
 	unsigned long base;
-        volatile struct lance_init_block *init_block; /* CPU address of RAM */
-        volatile struct lance_init_block *lance_init_block; /* LANCE address of RAM */
+	volatile struct lance_init_block *init_block; /* CPU address of RAM */
+	volatile struct lance_init_block *lance_init_block; /* LANCE address of RAM */
 
-        int rx_new, tx_new;
-        int rx_old, tx_old;
+	int rx_new, tx_new;
+	int rx_old, tx_old;
 
-        int lance_log_rx_bufs, lance_log_tx_bufs;
-        int rx_ring_mod_mask, tx_ring_mod_mask;
+	int lance_log_rx_bufs, lance_log_tx_bufs;
+	int rx_ring_mod_mask, tx_ring_mod_mask;
 
-        int tpe;                                  /* TPE is selected */
-        int auto_select;                          /* cable-selection is by carrier */
-        unsigned short busmaster_regval;
+	int tpe;			/* TPE is selected */
+	int auto_select;		/* cable-selection is by carrier */
+	unsigned short busmaster_regval;
 
-        unsigned int irq;                         /* IRQ to register */
+	unsigned int irq;		/* IRQ to register */
 
-        /* This is because the HP LANCE is disgusting and you have to check
-         * a DIO-specific register every time you read/write the LANCE regs :-<
-         * [could we get away with making these some sort of macro?]
-         */
-        void (*writerap)(void *, unsigned short);
-        void (*writerdp)(void *, unsigned short);
-        unsigned short (*readrdp)(void *);
+	/* This is because the HP LANCE is disgusting and you have to check
+	 * a DIO-specific register every time you read/write the LANCE regs :-<
+	 * [could we get away with making these some sort of macro?]
+	 */
+	void (*writerap)(void *, unsigned short);
+	void (*writerdp)(void *, unsigned short);
+	unsigned short (*readrdp)(void *);
 	spinlock_t devlock;
 	char tx_full;
 };
 
 /*
- *              Am7990 Control and Status Registers
+ *		Am7990 Control and Status Registers
  */
-#define LE_CSR0         0x0000          /* LANCE Controller Status */
-#define LE_CSR1         0x0001          /* IADR[15:0] (bit0==0 ie word aligned) */
-#define LE_CSR2         0x0002          /* IADR[23:16] (high bits reserved) */
-#define LE_CSR3         0x0003          /* Misc */
+#define LE_CSR0		0x0000	/* LANCE Controller Status */
+#define LE_CSR1		0x0001	/* IADR[15:0] (bit0==0 ie word aligned) */
+#define LE_CSR2		0x0002	/* IADR[23:16] (high bits reserved) */
+#define LE_CSR3		0x0003	/* Misc */
 
 /*
  *		Bit definitions for CSR0 (LANCE Controller Status)
  */
-#define LE_C0_ERR	0x8000		/* Error = BABL | CERR | MISS | MERR */
-#define LE_C0_BABL	0x4000		/* Babble: Transmitted too many bits */
-#define LE_C0_CERR	0x2000		/* No Heartbeat (10BASE-T) */
-#define LE_C0_MISS	0x1000		/* Missed Frame (no rx buffer to put it in) */
-#define LE_C0_MERR	0x0800		/* Memory Error */
-#define LE_C0_RINT	0x0400		/* Receive Interrupt */
-#define LE_C0_TINT	0x0200		/* Transmit Interrupt */
-#define LE_C0_IDON	0x0100		/* Initialization Done */
-#define LE_C0_INTR	0x0080		/* Interrupt Flag
-                                         = BABL | MISS | MERR | RINT | TINT | IDON */
-#define LE_C0_INEA	0x0040		/* Interrupt Enable */
-#define LE_C0_RXON	0x0020		/* Receive On */
-#define LE_C0_TXON	0x0010		/* Transmit On */
-#define LE_C0_TDMD	0x0008		/* Transmit Demand */
-#define LE_C0_STOP	0x0004		/* Stop */
-#define LE_C0_STRT	0x0002		/* Start */
-#define LE_C0_INIT	0x0001		/* Initialize */
+#define LE_C0_ERR	0x8000	/* Error = BABL | CERR | MISS | MERR */
+#define LE_C0_BABL	0x4000	/* Babble: Transmitted too many bits */
+#define LE_C0_CERR	0x2000	/* No Heartbeat (10BASE-T) */
+#define LE_C0_MISS	0x1000	/* Missed Frame (no rx buffer to put it in) */
+#define LE_C0_MERR	0x0800	/* Memory Error */
+#define LE_C0_RINT	0x0400	/* Receive Interrupt */
+#define LE_C0_TINT	0x0200	/* Transmit Interrupt */
+#define LE_C0_IDON	0x0100	/* Initialization Done */
+#define LE_C0_INTR	0x0080	/* Interrupt Flag
+				   = BABL | MISS | MERR | RINT | TINT | IDON */
+#define LE_C0_INEA	0x0040	/* Interrupt Enable */
+#define LE_C0_RXON	0x0020	/* Receive On */
+#define LE_C0_TXON	0x0010	/* Transmit On */
+#define LE_C0_TDMD	0x0008	/* Transmit Demand */
+#define LE_C0_STOP	0x0004	/* Stop */
+#define LE_C0_STRT	0x0002	/* Start */
+#define LE_C0_INIT	0x0001	/* Initialize */
 
 
 /*
  *		Bit definitions for CSR3
  */
-#define LE_C3_BSWP	0x0004		/* Byte Swap
-					   (on for big endian byte order) */
-#define LE_C3_ACON	0x0002		/* ALE Control
-					   (on for active low ALE) */
-#define LE_C3_BCON	0x0001		/* Byte Control */
+#define LE_C3_BSWP	0x0004	/* Byte Swap (on for big endian byte order) */
+#define LE_C3_ACON	0x0002	/* ALE Control (on for active low ALE) */
+#define LE_C3_BCON	0x0001	/* Byte Control */
 
 
 /*
  *		Mode Flags
  */
-#define LE_MO_PROM	0x8000		/* Promiscuous Mode */
+#define LE_MO_PROM	0x8000	/* Promiscuous Mode */
 /* these next ones 0x4000 -- 0x0080 are not available on the LANCE 7990,
  * but they are in NetBSD's am7990.h, presumably for backwards-compatible chips
  */
-#define LE_MO_DRCVBC  0x4000          /* disable receive broadcast */
-#define LE_MO_DRCVPA  0x2000          /* disable physical address detection */
-#define LE_MO_DLNKTST 0x1000          /* disable link status */
-#define LE_MO_DAPC    0x0800          /* disable automatic polarity correction */
-#define LE_MO_MENDECL 0x0400          /* MENDEC loopback mode */
-#define LE_MO_LRTTSEL 0x0200          /* lower RX threshold / TX mode selection */
-#define LE_MO_PSEL1   0x0100          /* port selection bit1 */
-#define LE_MO_PSEL0   0x0080          /* port selection bit0 */
+#define LE_MO_DRCVBC	0x4000	/* disable receive broadcast */
+#define LE_MO_DRCVPA	0x2000	/* disable physical address detection */
+#define LE_MO_DLNKTST	0x1000	/* disable link status */
+#define LE_MO_DAPC	0x0800	/* disable automatic polarity correction */
+#define LE_MO_MENDECL	0x0400	/* MENDEC loopback mode */
+#define LE_MO_LRTTSEL	0x0200	/* lower RX threshold / TX mode selection */
+#define LE_MO_PSEL1	0x0100	/* port selection bit1 */
+#define LE_MO_PSEL0	0x0080	/* port selection bit0 */
 /* and this one is from the C-LANCE data sheet... */
-#define LE_MO_EMBA      0x0080          /* Enable Modified Backoff Algorithm
-                                           (C-LANCE, not original LANCE) */
-#define LE_MO_INTL	0x0040		/* Internal Loopback */
-#define LE_MO_DRTY	0x0020		/* Disable Retry */
-#define LE_MO_FCOLL	0x0010		/* Force Collision */
-#define LE_MO_DXMTFCS	0x0008		/* Disable Transmit CRC */
-#define LE_MO_LOOP	0x0004		/* Loopback Enable */
-#define LE_MO_DTX	0x0002		/* Disable Transmitter */
-#define LE_MO_DRX	0x0001		/* Disable Receiver */
+#define LE_MO_EMBA	0x0080	/* Enable Modified Backoff Algorithm
+				   (C-LANCE, not original LANCE) */
+#define LE_MO_INTL	0x0040	/* Internal Loopback */
+#define LE_MO_DRTY	0x0020	/* Disable Retry */
+#define LE_MO_FCOLL	0x0010	/* Force Collision */
+#define LE_MO_DXMTFCS	0x0008	/* Disable Transmit CRC */
+#define LE_MO_LOOP	0x0004	/* Loopback Enable */
+#define LE_MO_DTX	0x0002	/* Disable Transmitter */
+#define LE_MO_DRX	0x0001	/* Disable Receiver */
 
 
 /*
  *		Receive Flags
  */
-#define LE_R1_OWN	0x80		/* LANCE owns the descriptor */
-#define LE_R1_ERR	0x40		/* Error */
-#define LE_R1_FRA	0x20		/* Framing Error */
-#define LE_R1_OFL	0x10		/* Overflow Error */
-#define LE_R1_CRC	0x08		/* CRC Error */
-#define LE_R1_BUF	0x04		/* Buffer Error */
-#define LE_R1_SOP	0x02		/* Start of Packet */
-#define LE_R1_EOP	0x01		/* End of Packet */
-#define LE_R1_POK       0x03		/* Packet is complete: SOP + EOP */
+#define LE_R1_OWN	0x80	/* LANCE owns the descriptor */
+#define LE_R1_ERR	0x40	/* Error */
+#define LE_R1_FRA	0x20	/* Framing Error */
+#define LE_R1_OFL	0x10	/* Overflow Error */
+#define LE_R1_CRC	0x08	/* CRC Error */
+#define LE_R1_BUF	0x04	/* Buffer Error */
+#define LE_R1_SOP	0x02	/* Start of Packet */
+#define LE_R1_EOP	0x01	/* End of Packet */
+#define LE_R1_POK	0x03	/* Packet is complete: SOP + EOP */
 
 
 /*
  *		Transmit Flags
  */
-#define LE_T1_OWN	0x80		/* LANCE owns the descriptor */
-#define LE_T1_ERR	0x40		/* Error */
-#define LE_T1_RES	0x20		/* Reserved, LANCE writes this with a zero */
-#define LE_T1_EMORE	0x10		/* More than one retry needed */
-#define LE_T1_EONE	0x08		/* One retry needed */
-#define LE_T1_EDEF	0x04		/* Deferred */
-#define LE_T1_SOP	0x02		/* Start of Packet */
-#define LE_T1_EOP	0x01		/* End of Packet */
-#define LE_T1_POK	0x03		/* Packet is complete: SOP + EOP */
+#define LE_T1_OWN	0x80	/* LANCE owns the descriptor */
+#define LE_T1_ERR	0x40	/* Error */
+#define LE_T1_RES	0x20	/* Reserved, LANCE writes this with a zero */
+#define LE_T1_EMORE	0x10	/* More than one retry needed */
+#define LE_T1_EONE	0x08	/* One retry needed */
+#define LE_T1_EDEF	0x04	/* Deferred */
+#define LE_T1_SOP	0x02	/* Start of Packet */
+#define LE_T1_EOP	0x01	/* End of Packet */
+#define LE_T1_POK	0x03	/* Packet is complete: SOP + EOP */
 
 /*
  *		Error Flags
  */
-#define LE_T3_BUF 	0x8000		/* Buffer Error */
-#define LE_T3_UFL 	0x4000		/* Underflow Error */
-#define LE_T3_LCOL 	0x1000		/* Late Collision */
-#define LE_T3_CLOS 	0x0800		/* Loss of Carrier */
-#define LE_T3_RTY 	0x0400		/* Retry Error */
-#define LE_T3_TDR	0x03ff		/* Time Domain Reflectometry */
+#define LE_T3_BUF	0x8000	/* Buffer Error */
+#define LE_T3_UFL	0x4000	/* Underflow Error */
+#define LE_T3_LCOL	0x1000	/* Late Collision */
+#define LE_T3_CLOS	0x0800	/* Loss of Carrier */
+#define LE_T3_RTY	0x0400	/* Retry Error */
+#define LE_T3_TDR	0x03ff	/* Time Domain Reflectometry */
 
 /* Miscellaneous useful macros */
 
-#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
-                        lp->tx_old+lp->tx_ring_mod_mask-lp->tx_new:\
-                        lp->tx_old - lp->tx_new-1)
+#define TX_BUFFS_AVAIL ((lp->tx_old <= lp->tx_new) ? \
+			lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new : \
+			lp->tx_old - lp->tx_new - 1)
 
 /* The LANCE only uses 24 bit addresses. This does the obvious thing. */
 #define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
 
 /* Now the prototypes we export */
 int lance_open(struct net_device *dev);
-int lance_close (struct net_device *dev);
-int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
-void lance_set_multicast (struct net_device *dev);
+int lance_close(struct net_device *dev);
+int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void lance_set_multicast(struct net_device *dev);
 void lance_tx_timeout(struct net_device *dev);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 void lance_poll(struct net_device *dev);
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index d042511bdc13..2061b471fd16 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -24,9 +24,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
- * USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
 
 Module Name:
 
@@ -74,7 +72,6 @@ Revision History:
 #include <linux/types.h>
 #include <linux/compiler.h>
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/pci.h>
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 8baa3527ba74..a75092d584cc 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -13,9 +13,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
- * USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
 
 Module Name:
 
@@ -753,7 +751,7 @@ struct amd8111e_priv{
 	const char *name;
 	struct pci_dev *pci_dev;	/* Ptr to the associated pci_dev */
 	struct net_device* amd8111e_net_dev; 	/* ptr to associated net_device */
-	/* Transmit and recive skbs */
+	/* Transmit and receive skbs */
 	struct sk_buff *tx_skbuff[NUM_TX_BUFFERS];
 	struct sk_buff *rx_skbuff[NUM_RX_BUFFERS];
 	/* Transmit and receive dma mapped addr */
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 427c148bb643..a2bd91e3d302 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -27,8 +27,7 @@
  *  for more details.
  *
  *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *  with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * ########################################################################
  *
@@ -48,7 +47,6 @@
 #include <linux/bitops.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h
index 4b7f7ad62bb8..ca53024f017f 100644
--- a/drivers/net/ethernet/amd/au1000_eth.h
+++ b/drivers/net/ethernet/amd/au1000_eth.h
@@ -18,8 +18,7 @@
  *  for more details.
  *
  *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *  with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * ########################################################################
  *
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 0c61fd50d882..47ce57c2c893 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -127,41 +127,41 @@ static void hplance_remove_one(struct dio_dev *d)
 /* Initialise a single lance board at the given DIO device */
 static void hplance_init(struct net_device *dev, struct dio_dev *d)
 {
-        unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
-        struct hplance_private *lp;
-        int i;
-
-        /* reset the board */
-        out_8(va+DIO_IDOFF, 0xff);
-        udelay(100);                              /* ariba! ariba! udelay! udelay! */
-
-        /* Fill the dev fields */
-        dev->base_addr = va;
-        dev->netdev_ops = &hplance_netdev_ops;
-        dev->dma = 0;
-
-        for (i=0; i<6; i++) {
-                /* The NVRAM holds our ethernet address, one nibble per byte,
-                 * at bytes NVRAMOFF+1,3,5,7,9...
-                 */
-                dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
-                        | (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
-        }
-
-        lp = netdev_priv(dev);
-        lp->lance.name = (char*)d->name;                /* discards const, shut up gcc */
-        lp->lance.base = va;
-        lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
-        lp->lance.lance_init_block = NULL;              /* LANCE addr of same RAM */
-        lp->lance.busmaster_regval = LE_C3_BSWP;        /* we're bigendian */
-        lp->lance.irq = d->ipl;
-        lp->lance.writerap = hplance_writerap;
-        lp->lance.writerdp = hplance_writerdp;
-        lp->lance.readrdp = hplance_readrdp;
-        lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
-        lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
-        lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
-        lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
+	unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
+	struct hplance_private *lp;
+	int i;
+
+	/* reset the board */
+	out_8(va + DIO_IDOFF, 0xff);
+	udelay(100);                              /* ariba! ariba! udelay! udelay! */
+
+	/* Fill the dev fields */
+	dev->base_addr = va;
+	dev->netdev_ops = &hplance_netdev_ops;
+	dev->dma = 0;
+
+	for (i = 0; i < 6; i++) {
+		/* The NVRAM holds our ethernet address, one nibble per byte,
+		 * at bytes NVRAMOFF+1,3,5,7,9...
+		 */
+		dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
+			| (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
+	}
+
+	lp = netdev_priv(dev);
+	lp->lance.name = d->name;
+	lp->lance.base = va;
+	lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
+	lp->lance.lance_init_block = NULL;              /* LANCE addr of same RAM */
+	lp->lance.busmaster_regval = LE_C3_BSWP;        /* we're bigendian */
+	lp->lance.irq = d->ipl;
+	lp->lance.writerap = hplance_writerap;
+	lp->lance.writerdp = hplance_writerdp;
+	lp->lance.readrdp = hplance_readrdp;
+	lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
+	lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
+	lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
+	lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
 }
 
 /* This is disgusting. We have to check the DIO status register for ack every
@@ -195,25 +195,25 @@ static unsigned short hplance_readrdp(void *priv)
 
 static int hplance_open(struct net_device *dev)
 {
-        int status;
-        struct lance_private *lp = netdev_priv(dev);
+	int status;
+	struct lance_private *lp = netdev_priv(dev);
 
-        status = lance_open(dev);                 /* call generic lance open code */
-        if (status)
-                return status;
-        /* enable interrupts at board level. */
-        out_8(lp->base + HPLANCE_STATUS, LE_IE);
+	status = lance_open(dev);                 /* call generic lance open code */
+	if (status)
+		return status;
+	/* enable interrupts at board level. */
+	out_8(lp->base + HPLANCE_STATUS, LE_IE);
 
-        return 0;
+	return 0;
 }
 
 static int hplance_close(struct net_device *dev)
 {
-        struct lance_private *lp = netdev_priv(dev);
+	struct lance_private *lp = netdev_priv(dev);
 
-        out_8(lp->base + HPLANCE_STATUS, 0);	/* disable interrupts at boardlevel */
-        lance_close(dev);
-        return 0;
+	out_8(lp->base + HPLANCE_STATUS, 0);	/* disable interrupts at boardlevel */
+	lance_close(dev);
+	return 0;
 }
 
 static int __init hplance_init_module(void)
@@ -223,7 +223,7 @@ static int __init hplance_init_module(void)
 
 static void __exit hplance_cleanup_module(void)
 {
-        dio_unregister_driver(&hplance_driver);
+	dio_unregister_driver(&hplance_driver);
 }
 
 module_init(hplance_init_module);
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index e108e911da05..0e8399dec054 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -94,33 +94,31 @@ struct net_device * __init mvme147lance_probe(int unit)
 	dev->netdev_ops = &lance_netdev_ops;
 	dev->dma = 0;
 
-	addr=(u_long *)ETHERNET_ADDRESS;
+	addr = (u_long *)ETHERNET_ADDRESS;
 	address = *addr;
-	dev->dev_addr[0]=0x08;
-	dev->dev_addr[1]=0x00;
-	dev->dev_addr[2]=0x3e;
-	address=address>>8;
-	dev->dev_addr[5]=address&0xff;
-	address=address>>8;
-	dev->dev_addr[4]=address&0xff;
-	address=address>>8;
-	dev->dev_addr[3]=address&0xff;
-
-	printk("%s: MVME147 at 0x%08lx, irq %d, "
-	       "Hardware Address %pM\n",
+	dev->dev_addr[0] = 0x08;
+	dev->dev_addr[1] = 0x00;
+	dev->dev_addr[2] = 0x3e;
+	address = address >> 8;
+	dev->dev_addr[5] = address&0xff;
+	address = address >> 8;
+	dev->dev_addr[4] = address&0xff;
+	address = address >> 8;
+	dev->dev_addr[3] = address&0xff;
+
+	printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n",
 	       dev->name, dev->base_addr, MVME147_LANCE_IRQ,
 	       dev->dev_addr);
 
 	lp = netdev_priv(dev);
 	lp->ram = __get_dma_pages(GFP_ATOMIC, 3);	/* 16K */
-	if (!lp->ram)
-	{
+	if (!lp->ram) {
 		printk("%s: No memory for LANCE buffers\n", dev->name);
 		free_netdev(dev);
 		return ERR_PTR(-ENOMEM);
 	}
 
-	lp->lance.name = (char*)name;                   /* discards const, shut up gcc */
+	lp->lance.name = name;
 	lp->lance.base = dev->base_addr;
 	lp->lance.init_block = (struct lance_init_block *)(lp->ram); /* CPU addr */
 	lp->lance.lance_init_block = (struct lance_init_block *)(lp->ram);                 /* LANCE addr of same RAM */
@@ -167,8 +165,8 @@ static int m147lance_open(struct net_device *dev)
 	if (status)
 		return status;
 	/* enable interrupts at board level. */
-	m147_pcc->lan_cntrl=0;       /* clear the interrupts (if any) */
-	m147_pcc->lan_cntrl=0x08 | 0x04;     /* Enable irq 4 */
+	m147_pcc->lan_cntrl = 0;       /* clear the interrupts (if any) */
+	m147_pcc->lan_cntrl = 0x08 | 0x04;     /* Enable irq 4 */
 
 	return 0;
 }
@@ -176,7 +174,7 @@ static int m147lance_open(struct net_device *dev)
 static int m147lance_close(struct net_device *dev)
 {
 	/* disable interrupts at boardlevel */
-	m147_pcc->lan_cntrl=0x0; /* disable interrupts */
+	m147_pcc->lan_cntrl = 0x0; /* disable interrupts */
 	lance_close(dev);
 	return 0;
 }
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index d4ed89130c52..08569fe2b182 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -132,7 +132,6 @@ Include Files
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/ptrace.h>
 #include <linux/slab.h>
 #include <linux/string.h>
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 38492e0b704e..9339cccfe05a 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1668,7 +1668,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
 	for (i = 0; i < ETH_ALEN; i++)
 		promaddr[i] = inb(ioaddr + i);
 
-	if (memcmp(promaddr, dev->dev_addr, ETH_ALEN) ||
+	if (!ether_addr_equal(promaddr, dev->dev_addr) ||
 	    !is_valid_ether_addr(dev->dev_addr)) {
 		if (is_valid_ether_addr(promaddr)) {
 			if (pcnet32_debug & NETIF_MSG_PROBE) {
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index ece56831a647..5e4273b7aa27 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -80,7 +80,6 @@ static char lancestr[] = "LANCE";
 #include <linux/in.h>
 #include <linux/string.h>
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/crc32.h>
 #include <linux/errno.h>
 #include <linux/socket.h> /* Used for the temporal inet entries and routing */
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index dc08678bf9a4..928fac6dd10a 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -122,7 +122,6 @@ struct buffer_state {
  * @link:	PHY's last seen link state.
  * @duplex:	PHY's last set duplex mode.
  * @speed:	PHY's last set speed.
- * @max_speed:	Maximum supported by current system network data-rate.
  */
 struct arc_emac_priv {
 	/* Devices */
@@ -152,7 +151,6 @@ struct arc_emac_priv {
 	unsigned int link;
 	unsigned int duplex;
 	unsigned int speed;
-	unsigned int max_speed;
 };
 
 /**
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 248baf6273fb..eeecc29cf5b7 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -381,17 +381,7 @@ static int arc_emac_open(struct net_device *ndev)
 	phy_dev->autoneg = AUTONEG_ENABLE;
 	phy_dev->speed = 0;
 	phy_dev->duplex = 0;
-	phy_dev->advertising = phy_dev->supported;
-
-	if (priv->max_speed > 100) {
-		phy_dev->advertising &= PHY_GBIT_FEATURES;
-	} else if (priv->max_speed <= 100) {
-		phy_dev->advertising &= PHY_BASIC_FEATURES;
-		if (priv->max_speed <= 10) {
-			phy_dev->advertising &= ~SUPPORTED_100baseT_Half;
-			phy_dev->advertising &= ~SUPPORTED_100baseT_Full;
-		}
-	}
+	phy_dev->advertising &= phy_dev->supported;
 
 	priv->last_rx_bd = 0;
 
@@ -704,14 +694,6 @@ static int arc_emac_probe(struct platform_device *pdev)
 	/* Set poll rate so that it polls every 1 ms */
 	arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000);
 
-	/* Get max speed of operation from device tree */
-	if (of_property_read_u32(pdev->dev.of_node, "max-speed",
-				 &priv->max_speed)) {
-		dev_err(&pdev->dev, "failed to retrieve <max-speed> from device tree\n");
-		err = -EINVAL;
-		goto out;
-	}
-
 	ndev->irq = irq;
 	dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq);
 
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index d71103dbf2cd..8fc93c5f6abc 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -106,6 +106,9 @@ struct alx_priv {
 	u16 msg_enable;
 
 	bool msi;
+
+	/* protects hw.stats */
+	spinlock_t stats_lock;
 };
 
 extern const struct ethtool_ops alx_ethtool_ops;
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
index 45b36507abc1..08e22df2a300 100644
--- a/drivers/net/ethernet/atheros/alx/ethtool.c
+++ b/drivers/net/ethernet/atheros/alx/ethtool.c
@@ -46,6 +46,66 @@
 #include "reg.h"
 #include "hw.h"
 
+/* The order of these strings must match the order of the fields in
+ * struct alx_hw_stats
+ * See hw.h
+ */
+static const char alx_gstrings_stats[][ETH_GSTRING_LEN] = {
+	"rx_packets",
+	"rx_bcast_packets",
+	"rx_mcast_packets",
+	"rx_pause_packets",
+	"rx_ctrl_packets",
+	"rx_fcs_errors",
+	"rx_length_errors",
+	"rx_bytes",
+	"rx_runt_packets",
+	"rx_fragments",
+	"rx_64B_or_less_packets",
+	"rx_65B_to_127B_packets",
+	"rx_128B_to_255B_packets",
+	"rx_256B_to_511B_packets",
+	"rx_512B_to_1023B_packets",
+	"rx_1024B_to_1518B_packets",
+	"rx_1519B_to_mtu_packets",
+	"rx_oversize_packets",
+	"rx_rxf_ov_drop_packets",
+	"rx_rrd_ov_drop_packets",
+	"rx_align_errors",
+	"rx_bcast_bytes",
+	"rx_mcast_bytes",
+	"rx_address_errors",
+	"tx_packets",
+	"tx_bcast_packets",
+	"tx_mcast_packets",
+	"tx_pause_packets",
+	"tx_exc_defer_packets",
+	"tx_ctrl_packets",
+	"tx_defer_packets",
+	"tx_bytes",
+	"tx_64B_or_less_packets",
+	"tx_65B_to_127B_packets",
+	"tx_128B_to_255B_packets",
+	"tx_256B_to_511B_packets",
+	"tx_512B_to_1023B_packets",
+	"tx_1024B_to_1518B_packets",
+	"tx_1519B_to_mtu_packets",
+	"tx_single_collision",
+	"tx_multiple_collisions",
+	"tx_late_collision",
+	"tx_abort_collision",
+	"tx_underrun",
+	"tx_trd_eop",
+	"tx_length_errors",
+	"tx_trunc_packets",
+	"tx_bcast_bytes",
+	"tx_mcast_bytes",
+	"tx_update",
+};
+
+#define ALX_NUM_STATS ARRAY_SIZE(alx_gstrings_stats)
+
+
 static u32 alx_get_supported_speeds(struct alx_hw *hw)
 {
 	u32 supported = SUPPORTED_10baseT_Half |
@@ -201,6 +261,44 @@ static void alx_set_msglevel(struct net_device *netdev, u32 data)
 	alx->msg_enable = data;
 }
 
+static void alx_get_ethtool_stats(struct net_device *netdev,
+				  struct ethtool_stats *estats, u64 *data)
+{
+	struct alx_priv *alx = netdev_priv(netdev);
+	struct alx_hw *hw = &alx->hw;
+
+	spin_lock(&alx->stats_lock);
+
+	alx_update_hw_stats(hw);
+	BUILD_BUG_ON(sizeof(hw->stats) - offsetof(struct alx_hw_stats, rx_ok) <
+		     ALX_NUM_STATS * sizeof(u64));
+	memcpy(data, &hw->stats.rx_ok, ALX_NUM_STATS * sizeof(u64));
+
+	spin_unlock(&alx->stats_lock);
+}
+
+static void alx_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		memcpy(buf, &alx_gstrings_stats, sizeof(alx_gstrings_stats));
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+}
+
+static int alx_get_sset_count(struct net_device *netdev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ALX_NUM_STATS;
+	default:
+		return -EINVAL;
+	}
+}
+
 const struct ethtool_ops alx_ethtool_ops = {
 	.get_settings	= alx_get_settings,
 	.set_settings	= alx_set_settings,
@@ -209,4 +307,7 @@ const struct ethtool_ops alx_ethtool_ops = {
 	.get_msglevel	= alx_get_msglevel,
 	.set_msglevel	= alx_set_msglevel,
 	.get_link	= ethtool_op_get_link,
+	.get_strings	= alx_get_strings,
+	.get_sset_count	= alx_get_sset_count,
+	.get_ethtool_stats	= alx_get_ethtool_stats,
 };
diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c
index 1e8c24a3cb4e..7712f068f6d4 100644
--- a/drivers/net/ethernet/atheros/alx/hw.c
+++ b/drivers/net/ethernet/atheros/alx/hw.c
@@ -1050,3 +1050,61 @@ bool alx_get_phy_info(struct alx_hw *hw)
 
 	return true;
 }
+
+void alx_update_hw_stats(struct alx_hw *hw)
+{
+	/* RX stats */
+	hw->stats.rx_ok          += alx_read_mem32(hw, ALX_MIB_RX_OK);
+	hw->stats.rx_bcast       += alx_read_mem32(hw, ALX_MIB_RX_BCAST);
+	hw->stats.rx_mcast       += alx_read_mem32(hw, ALX_MIB_RX_MCAST);
+	hw->stats.rx_pause       += alx_read_mem32(hw, ALX_MIB_RX_PAUSE);
+	hw->stats.rx_ctrl        += alx_read_mem32(hw, ALX_MIB_RX_CTRL);
+	hw->stats.rx_fcs_err     += alx_read_mem32(hw, ALX_MIB_RX_FCS_ERR);
+	hw->stats.rx_len_err     += alx_read_mem32(hw, ALX_MIB_RX_LEN_ERR);
+	hw->stats.rx_byte_cnt    += alx_read_mem32(hw, ALX_MIB_RX_BYTE_CNT);
+	hw->stats.rx_runt        += alx_read_mem32(hw, ALX_MIB_RX_RUNT);
+	hw->stats.rx_frag        += alx_read_mem32(hw, ALX_MIB_RX_FRAG);
+	hw->stats.rx_sz_64B      += alx_read_mem32(hw, ALX_MIB_RX_SZ_64B);
+	hw->stats.rx_sz_127B     += alx_read_mem32(hw, ALX_MIB_RX_SZ_127B);
+	hw->stats.rx_sz_255B     += alx_read_mem32(hw, ALX_MIB_RX_SZ_255B);
+	hw->stats.rx_sz_511B     += alx_read_mem32(hw, ALX_MIB_RX_SZ_511B);
+	hw->stats.rx_sz_1023B    += alx_read_mem32(hw, ALX_MIB_RX_SZ_1023B);
+	hw->stats.rx_sz_1518B    += alx_read_mem32(hw, ALX_MIB_RX_SZ_1518B);
+	hw->stats.rx_sz_max      += alx_read_mem32(hw, ALX_MIB_RX_SZ_MAX);
+	hw->stats.rx_ov_sz       += alx_read_mem32(hw, ALX_MIB_RX_OV_SZ);
+	hw->stats.rx_ov_rxf      += alx_read_mem32(hw, ALX_MIB_RX_OV_RXF);
+	hw->stats.rx_ov_rrd      += alx_read_mem32(hw, ALX_MIB_RX_OV_RRD);
+	hw->stats.rx_align_err   += alx_read_mem32(hw, ALX_MIB_RX_ALIGN_ERR);
+	hw->stats.rx_bc_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_BCCNT);
+	hw->stats.rx_mc_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_MCCNT);
+	hw->stats.rx_err_addr    += alx_read_mem32(hw, ALX_MIB_RX_ERRADDR);
+
+	/* TX stats */
+	hw->stats.tx_ok          += alx_read_mem32(hw, ALX_MIB_TX_OK);
+	hw->stats.tx_bcast       += alx_read_mem32(hw, ALX_MIB_TX_BCAST);
+	hw->stats.tx_mcast       += alx_read_mem32(hw, ALX_MIB_TX_MCAST);
+	hw->stats.tx_pause       += alx_read_mem32(hw, ALX_MIB_TX_PAUSE);
+	hw->stats.tx_exc_defer   += alx_read_mem32(hw, ALX_MIB_TX_EXC_DEFER);
+	hw->stats.tx_ctrl        += alx_read_mem32(hw, ALX_MIB_TX_CTRL);
+	hw->stats.tx_defer       += alx_read_mem32(hw, ALX_MIB_TX_DEFER);
+	hw->stats.tx_byte_cnt    += alx_read_mem32(hw, ALX_MIB_TX_BYTE_CNT);
+	hw->stats.tx_sz_64B      += alx_read_mem32(hw, ALX_MIB_TX_SZ_64B);
+	hw->stats.tx_sz_127B     += alx_read_mem32(hw, ALX_MIB_TX_SZ_127B);
+	hw->stats.tx_sz_255B     += alx_read_mem32(hw, ALX_MIB_TX_SZ_255B);
+	hw->stats.tx_sz_511B     += alx_read_mem32(hw, ALX_MIB_TX_SZ_511B);
+	hw->stats.tx_sz_1023B    += alx_read_mem32(hw, ALX_MIB_TX_SZ_1023B);
+	hw->stats.tx_sz_1518B    += alx_read_mem32(hw, ALX_MIB_TX_SZ_1518B);
+	hw->stats.tx_sz_max      += alx_read_mem32(hw, ALX_MIB_TX_SZ_MAX);
+	hw->stats.tx_single_col  += alx_read_mem32(hw, ALX_MIB_TX_SINGLE_COL);
+	hw->stats.tx_multi_col   += alx_read_mem32(hw, ALX_MIB_TX_MULTI_COL);
+	hw->stats.tx_late_col    += alx_read_mem32(hw, ALX_MIB_TX_LATE_COL);
+	hw->stats.tx_abort_col   += alx_read_mem32(hw, ALX_MIB_TX_ABORT_COL);
+	hw->stats.tx_underrun    += alx_read_mem32(hw, ALX_MIB_TX_UNDERRUN);
+	hw->stats.tx_trd_eop     += alx_read_mem32(hw, ALX_MIB_TX_TRD_EOP);
+	hw->stats.tx_len_err     += alx_read_mem32(hw, ALX_MIB_TX_LEN_ERR);
+	hw->stats.tx_trunc       += alx_read_mem32(hw, ALX_MIB_TX_TRUNC);
+	hw->stats.tx_bc_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_BCCNT);
+	hw->stats.tx_mc_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_MCCNT);
+
+	hw->stats.update         += alx_read_mem32(hw, ALX_MIB_UPDATE);
+}
diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h
index 96f3b4381e17..15548802d6f8 100644
--- a/drivers/net/ethernet/atheros/alx/hw.h
+++ b/drivers/net/ethernet/atheros/alx/hw.h
@@ -381,6 +381,73 @@ struct alx_rrd {
 				 ALX_ISR_RX_Q6 | \
 				 ALX_ISR_RX_Q7)
 
+/* Statistics counters collected by the MAC
+ *
+ * The order of the fields must match the strings in alx_gstrings_stats
+ * All stats fields should be u64
+ * See ethtool.c
+ */
+struct alx_hw_stats {
+	/* rx */
+	u64 rx_ok;		/* good RX packets */
+	u64 rx_bcast;		/* good RX broadcast packets */
+	u64 rx_mcast;		/* good RX multicast packets */
+	u64 rx_pause;		/* RX pause frames */
+	u64 rx_ctrl;		/* RX control packets other than pause frames */
+	u64 rx_fcs_err;		/* RX packets with bad FCS */
+	u64 rx_len_err;		/* RX packets with length != actual size */
+	u64 rx_byte_cnt;	/* good bytes received. FCS is NOT included */
+	u64 rx_runt;		/* RX packets < 64 bytes with good FCS */
+	u64 rx_frag;		/* RX packets < 64 bytes with bad FCS */
+	u64 rx_sz_64B;		/* 64 byte RX packets */
+	u64 rx_sz_127B;		/* 65-127 byte RX packets */
+	u64 rx_sz_255B;		/* 128-255 byte RX packets */
+	u64 rx_sz_511B;		/* 256-511 byte RX packets */
+	u64 rx_sz_1023B;	/* 512-1023 byte RX packets */
+	u64 rx_sz_1518B;	/* 1024-1518 byte RX packets */
+	u64 rx_sz_max;		/* 1519 byte to MTU RX packets */
+	u64 rx_ov_sz;		/* truncated RX packets, size > MTU */
+	u64 rx_ov_rxf;		/* frames dropped due to RX FIFO overflow */
+	u64 rx_ov_rrd;		/* frames dropped due to RRD overflow */
+	u64 rx_align_err;	/* alignment errors */
+	u64 rx_bc_byte_cnt;	/* RX broadcast bytes, excluding FCS */
+	u64 rx_mc_byte_cnt;	/* RX multicast bytes, excluding FCS */
+	u64 rx_err_addr;	/* packets dropped due to address filtering */
+
+	/* tx */
+	u64 tx_ok;		/* good TX packets */
+	u64 tx_bcast;		/* good TX broadcast packets */
+	u64 tx_mcast;		/* good TX multicast packets */
+	u64 tx_pause;		/* TX pause frames */
+	u64 tx_exc_defer;	/* TX packets deferred excessively */
+	u64 tx_ctrl;		/* TX control frames, excluding pause frames */
+	u64 tx_defer;		/* TX packets deferred */
+	u64 tx_byte_cnt;	/* bytes transmitted, FCS is NOT included */
+	u64 tx_sz_64B;		/* 64 byte TX packets */
+	u64 tx_sz_127B;		/* 65-127 byte TX packets */
+	u64 tx_sz_255B;		/* 128-255 byte TX packets */
+	u64 tx_sz_511B;		/* 256-511 byte TX packets */
+	u64 tx_sz_1023B;	/* 512-1023 byte TX packets */
+	u64 tx_sz_1518B;	/* 1024-1518 byte TX packets */
+	u64 tx_sz_max;		/* 1519 byte to MTU TX packets */
+	u64 tx_single_col;	/* packets TX after a single collision */
+	u64 tx_multi_col;	/* packets TX after multiple collisions */
+	u64 tx_late_col;	/* TX packets with late collisions */
+	u64 tx_abort_col;	/* TX packets aborted w/excessive collisions */
+	u64 tx_underrun;	/* TX packets aborted due to TX FIFO underrun
+				 * or TRD FIFO underrun
+				 */
+	u64 tx_trd_eop;		/* reads beyond the EOP into the next frame
+				 * when TRD was not written timely
+				 */
+	u64 tx_len_err;		/* TX packets where length != actual size */
+	u64 tx_trunc;		/* TX packets truncated due to size > MTU */
+	u64 tx_bc_byte_cnt;	/* broadcast bytes transmitted, excluding FCS */
+	u64 tx_mc_byte_cnt;	/* multicast bytes transmitted, excluding FCS */
+	u64 update;
+};
+
+
 /* maximum interrupt vectors for msix */
 #define ALX_MAX_MSIX_INTRS	16
 
@@ -424,6 +491,9 @@ struct alx_hw {
 
 	/* PHY link patch flag */
 	bool lnk_patch;
+
+	/* cumulated stats from the hardware (registers are cleared on read) */
+	struct alx_hw_stats stats;
 };
 
 static inline int alx_hw_revision(struct alx_hw *hw)
@@ -491,6 +561,7 @@ bool alx_phy_configured(struct alx_hw *hw);
 void alx_configure_basic(struct alx_hw *hw);
 void alx_disable_rss(struct alx_hw *hw);
 bool alx_get_phy_info(struct alx_hw *hw);
+void alx_update_hw_stats(struct alx_hw *hw);
 
 static inline u32 alx_speed_to_ethadv(int speed, u8 duplex)
 {
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c3c4c266b846..e92ffd6e1c15 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1166,10 +1166,60 @@ static void alx_poll_controller(struct net_device *netdev)
 }
 #endif
 
+static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev,
+					struct rtnl_link_stats64 *net_stats)
+{
+	struct alx_priv *alx = netdev_priv(dev);
+	struct alx_hw_stats *hw_stats = &alx->hw.stats;
+
+	spin_lock(&alx->stats_lock);
+
+	alx_update_hw_stats(&alx->hw);
+
+	net_stats->tx_bytes   = hw_stats->tx_byte_cnt;
+	net_stats->rx_bytes   = hw_stats->rx_byte_cnt;
+	net_stats->multicast  = hw_stats->rx_mcast;
+	net_stats->collisions = hw_stats->tx_single_col +
+				hw_stats->tx_multi_col +
+				hw_stats->tx_late_col +
+				hw_stats->tx_abort_col;
+
+	net_stats->rx_errors  = hw_stats->rx_frag +
+				hw_stats->rx_fcs_err +
+				hw_stats->rx_len_err +
+				hw_stats->rx_ov_sz +
+				hw_stats->rx_ov_rrd +
+				hw_stats->rx_align_err +
+				hw_stats->rx_ov_rxf;
+
+	net_stats->rx_fifo_errors   = hw_stats->rx_ov_rxf;
+	net_stats->rx_length_errors = hw_stats->rx_len_err;
+	net_stats->rx_crc_errors    = hw_stats->rx_fcs_err;
+	net_stats->rx_frame_errors  = hw_stats->rx_align_err;
+	net_stats->rx_dropped       = hw_stats->rx_ov_rrd;
+
+	net_stats->tx_errors = hw_stats->tx_late_col +
+			       hw_stats->tx_abort_col +
+			       hw_stats->tx_underrun +
+			       hw_stats->tx_trunc;
+
+	net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
+	net_stats->tx_fifo_errors    = hw_stats->tx_underrun;
+	net_stats->tx_window_errors  = hw_stats->tx_late_col;
+
+	net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
+	net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
+
+	spin_unlock(&alx->stats_lock);
+
+	return net_stats;
+}
+
 static const struct net_device_ops alx_netdev_ops = {
 	.ndo_open               = alx_open,
 	.ndo_stop               = alx_stop,
 	.ndo_start_xmit         = alx_start_xmit,
+	.ndo_get_stats64        = alx_get_stats64,
 	.ndo_set_rx_mode        = alx_set_rx_mode,
 	.ndo_validate_addr      = eth_validate_addr,
 	.ndo_set_mac_address    = alx_set_mac_address,
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
index e4358c98bc4e..af006b44b2a6 100644
--- a/drivers/net/ethernet/atheros/alx/reg.h
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -404,15 +404,59 @@
 
 /* MIB */
 #define ALX_MIB_BASE					0x1700
+
 #define ALX_MIB_RX_OK					(ALX_MIB_BASE + 0)
+#define ALX_MIB_RX_BCAST				(ALX_MIB_BASE + 4)
+#define ALX_MIB_RX_MCAST				(ALX_MIB_BASE + 8)
+#define ALX_MIB_RX_PAUSE				(ALX_MIB_BASE + 12)
+#define ALX_MIB_RX_CTRL					(ALX_MIB_BASE + 16)
+#define ALX_MIB_RX_FCS_ERR				(ALX_MIB_BASE + 20)
+#define ALX_MIB_RX_LEN_ERR				(ALX_MIB_BASE + 24)
+#define ALX_MIB_RX_BYTE_CNT				(ALX_MIB_BASE + 28)
+#define ALX_MIB_RX_RUNT					(ALX_MIB_BASE + 32)
+#define ALX_MIB_RX_FRAG					(ALX_MIB_BASE + 36)
+#define ALX_MIB_RX_SZ_64B				(ALX_MIB_BASE + 40)
+#define ALX_MIB_RX_SZ_127B				(ALX_MIB_BASE + 44)
+#define ALX_MIB_RX_SZ_255B				(ALX_MIB_BASE + 48)
+#define ALX_MIB_RX_SZ_511B				(ALX_MIB_BASE + 52)
+#define ALX_MIB_RX_SZ_1023B				(ALX_MIB_BASE + 56)
+#define ALX_MIB_RX_SZ_1518B				(ALX_MIB_BASE + 60)
+#define ALX_MIB_RX_SZ_MAX				(ALX_MIB_BASE + 64)
+#define ALX_MIB_RX_OV_SZ				(ALX_MIB_BASE + 68)
+#define ALX_MIB_RX_OV_RXF				(ALX_MIB_BASE + 72)
+#define ALX_MIB_RX_OV_RRD				(ALX_MIB_BASE + 76)
+#define ALX_MIB_RX_ALIGN_ERR				(ALX_MIB_BASE + 80)
+#define ALX_MIB_RX_BCCNT				(ALX_MIB_BASE + 84)
+#define ALX_MIB_RX_MCCNT				(ALX_MIB_BASE + 88)
 #define ALX_MIB_RX_ERRADDR				(ALX_MIB_BASE + 92)
+
 #define ALX_MIB_TX_OK					(ALX_MIB_BASE + 96)
+#define ALX_MIB_TX_BCAST				(ALX_MIB_BASE + 100)
+#define ALX_MIB_TX_MCAST				(ALX_MIB_BASE + 104)
+#define ALX_MIB_TX_PAUSE				(ALX_MIB_BASE + 108)
+#define ALX_MIB_TX_EXC_DEFER				(ALX_MIB_BASE + 112)
+#define ALX_MIB_TX_CTRL					(ALX_MIB_BASE + 116)
+#define ALX_MIB_TX_DEFER				(ALX_MIB_BASE + 120)
+#define ALX_MIB_TX_BYTE_CNT				(ALX_MIB_BASE + 124)
+#define ALX_MIB_TX_SZ_64B				(ALX_MIB_BASE + 128)
+#define ALX_MIB_TX_SZ_127B				(ALX_MIB_BASE + 132)
+#define ALX_MIB_TX_SZ_255B				(ALX_MIB_BASE + 136)
+#define ALX_MIB_TX_SZ_511B				(ALX_MIB_BASE + 140)
+#define ALX_MIB_TX_SZ_1023B				(ALX_MIB_BASE + 144)
+#define ALX_MIB_TX_SZ_1518B				(ALX_MIB_BASE + 148)
+#define ALX_MIB_TX_SZ_MAX				(ALX_MIB_BASE + 152)
+#define ALX_MIB_TX_SINGLE_COL				(ALX_MIB_BASE + 156)
+#define ALX_MIB_TX_MULTI_COL				(ALX_MIB_BASE + 160)
+#define ALX_MIB_TX_LATE_COL				(ALX_MIB_BASE + 164)
+#define ALX_MIB_TX_ABORT_COL				(ALX_MIB_BASE + 168)
+#define ALX_MIB_TX_UNDERRUN				(ALX_MIB_BASE + 172)
+#define ALX_MIB_TX_TRD_EOP				(ALX_MIB_BASE + 176)
+#define ALX_MIB_TX_LEN_ERR				(ALX_MIB_BASE + 180)
+#define ALX_MIB_TX_TRUNC				(ALX_MIB_BASE + 184)
+#define ALX_MIB_TX_BCCNT				(ALX_MIB_BASE + 188)
 #define ALX_MIB_TX_MCCNT				(ALX_MIB_BASE + 192)
+#define ALX_MIB_UPDATE					(ALX_MIB_BASE + 196)
 
-#define ALX_RX_STATS_BIN				ALX_MIB_RX_OK
-#define ALX_RX_STATS_END				ALX_MIB_RX_ERRADDR
-#define ALX_TX_STATS_BIN				ALX_MIB_TX_OK
-#define ALX_TX_STATS_END				ALX_MIB_TX_MCCNT
 
 #define ALX_ISR						0x1600
 #define ALX_ISR_DIS					BIT(31)
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index 7f9369a3b378..b9203d928938 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -22,7 +22,6 @@
 #ifndef _ATL1C_H_
 #define _ATL1C_H_
 
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/types.h>
 #include <linux/errno.h>
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 29801750f239..4d3258dd0a88 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1500,31 +1500,40 @@ static struct net_device_stats *atl1c_get_stats(struct net_device *netdev)
 	struct net_device_stats *net_stats = &netdev->stats;
 
 	atl1c_update_hw_stats(adapter);
-	net_stats->rx_packets = hw_stats->rx_ok;
-	net_stats->tx_packets = hw_stats->tx_ok;
 	net_stats->rx_bytes   = hw_stats->rx_byte_cnt;
 	net_stats->tx_bytes   = hw_stats->tx_byte_cnt;
 	net_stats->multicast  = hw_stats->rx_mcast;
 	net_stats->collisions = hw_stats->tx_1_col +
-				hw_stats->tx_2_col * 2 +
-				hw_stats->tx_late_col + hw_stats->tx_abort_col;
-	net_stats->rx_errors  = hw_stats->rx_frag + hw_stats->rx_fcs_err +
-				hw_stats->rx_len_err + hw_stats->rx_sz_ov +
-				hw_stats->rx_rrd_ov + hw_stats->rx_align_err;
+				hw_stats->tx_2_col +
+				hw_stats->tx_late_col +
+				hw_stats->tx_abort_col;
+
+	net_stats->rx_errors  = hw_stats->rx_frag +
+				hw_stats->rx_fcs_err +
+				hw_stats->rx_len_err +
+				hw_stats->rx_sz_ov +
+				hw_stats->rx_rrd_ov +
+				hw_stats->rx_align_err +
+				hw_stats->rx_rxf_ov;
+
 	net_stats->rx_fifo_errors   = hw_stats->rx_rxf_ov;
 	net_stats->rx_length_errors = hw_stats->rx_len_err;
 	net_stats->rx_crc_errors    = hw_stats->rx_fcs_err;
 	net_stats->rx_frame_errors  = hw_stats->rx_align_err;
-	net_stats->rx_over_errors   = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+	net_stats->rx_dropped       = hw_stats->rx_rrd_ov;
 
-	net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+	net_stats->tx_errors = hw_stats->tx_late_col +
+			       hw_stats->tx_abort_col +
+			       hw_stats->tx_underrun +
+			       hw_stats->tx_trunc;
 
-	net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col +
-				hw_stats->tx_underrun + hw_stats->tx_trunc;
 	net_stats->tx_fifo_errors    = hw_stats->tx_underrun;
 	net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
 	net_stats->tx_window_errors  = hw_stats->tx_late_col;
 
+	net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
+	net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
+
 	return net_stats;
 }
 
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
index 1b0fe2d04a0e..0212dac7e23a 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -23,7 +23,6 @@
 #ifndef _ATL1E_H_
 #define _ATL1E_H_
 
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/types.h>
 #include <linux/errno.h>
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 7a73f3a9fcb5..d5c2d3e912e5 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1177,32 +1177,40 @@ static struct net_device_stats *atl1e_get_stats(struct net_device *netdev)
 	struct atl1e_hw_stats  *hw_stats = &adapter->hw_stats;
 	struct net_device_stats *net_stats = &netdev->stats;
 
-	net_stats->rx_packets = hw_stats->rx_ok;
-	net_stats->tx_packets = hw_stats->tx_ok;
 	net_stats->rx_bytes   = hw_stats->rx_byte_cnt;
 	net_stats->tx_bytes   = hw_stats->tx_byte_cnt;
 	net_stats->multicast  = hw_stats->rx_mcast;
 	net_stats->collisions = hw_stats->tx_1_col +
-				hw_stats->tx_2_col * 2 +
-				hw_stats->tx_late_col + hw_stats->tx_abort_col;
+				hw_stats->tx_2_col +
+				hw_stats->tx_late_col +
+				hw_stats->tx_abort_col;
+
+	net_stats->rx_errors  = hw_stats->rx_frag +
+				hw_stats->rx_fcs_err +
+				hw_stats->rx_len_err +
+				hw_stats->rx_sz_ov +
+				hw_stats->rx_rrd_ov +
+				hw_stats->rx_align_err +
+				hw_stats->rx_rxf_ov;
 
-	net_stats->rx_errors  = hw_stats->rx_frag + hw_stats->rx_fcs_err +
-				hw_stats->rx_len_err + hw_stats->rx_sz_ov +
-				hw_stats->rx_rrd_ov + hw_stats->rx_align_err;
 	net_stats->rx_fifo_errors   = hw_stats->rx_rxf_ov;
 	net_stats->rx_length_errors = hw_stats->rx_len_err;
 	net_stats->rx_crc_errors    = hw_stats->rx_fcs_err;
 	net_stats->rx_frame_errors  = hw_stats->rx_align_err;
-	net_stats->rx_over_errors   = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+	net_stats->rx_dropped       = hw_stats->rx_rrd_ov;
 
-	net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+	net_stats->tx_errors = hw_stats->tx_late_col +
+			       hw_stats->tx_abort_col +
+			       hw_stats->tx_underrun +
+			       hw_stats->tx_trunc;
 
-	net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col +
-			       hw_stats->tx_underrun + hw_stats->tx_trunc;
 	net_stats->tx_fifo_errors    = hw_stats->tx_underrun;
 	net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
 	net_stats->tx_window_errors  = hw_stats->tx_late_col;
 
+	net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
+	net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
+
 	return net_stats;
 }
 
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 538211d6f7d9..287272dd69da 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1678,33 +1678,42 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
 	struct net_device *netdev = adapter->netdev;
 	struct stats_msg_block *smb = adapter->smb.smb;
 
+	u64 new_rx_errors = smb->rx_frag +
+			    smb->rx_fcs_err +
+			    smb->rx_len_err +
+			    smb->rx_sz_ov +
+			    smb->rx_rxf_ov +
+			    smb->rx_rrd_ov +
+			    smb->rx_align_err;
+	u64 new_tx_errors = smb->tx_late_col +
+			    smb->tx_abort_col +
+			    smb->tx_underrun +
+			    smb->tx_trunc;
+
 	/* Fill out the OS statistics structure */
-	adapter->soft_stats.rx_packets += smb->rx_ok;
-	adapter->soft_stats.tx_packets += smb->tx_ok;
+	adapter->soft_stats.rx_packets += smb->rx_ok + new_rx_errors;
+	adapter->soft_stats.tx_packets += smb->tx_ok + new_tx_errors;
 	adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
 	adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
 	adapter->soft_stats.multicast += smb->rx_mcast;
-	adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 +
-		smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry);
+	adapter->soft_stats.collisions += smb->tx_1_col +
+					  smb->tx_2_col +
+					  smb->tx_late_col +
+					  smb->tx_abort_col;
 
 	/* Rx Errors */
-	adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err +
-		smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov +
-		smb->rx_rrd_ov + smb->rx_align_err);
+	adapter->soft_stats.rx_errors += new_rx_errors;
 	adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
 	adapter->soft_stats.rx_length_errors += smb->rx_len_err;
 	adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
 	adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
-	adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
-		smb->rx_rxf_ov);
 
 	adapter->soft_stats.rx_pause += smb->rx_pause;
 	adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
 	adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
 
 	/* Tx Errors */
-	adapter->soft_stats.tx_errors += (smb->tx_late_col +
-		smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc);
+	adapter->soft_stats.tx_errors += new_tx_errors;
 	adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
 	adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
 	adapter->soft_stats.tx_window_errors += smb->tx_late_col;
@@ -1718,23 +1727,18 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
 	adapter->soft_stats.tx_trunc += smb->tx_trunc;
 	adapter->soft_stats.tx_pause += smb->tx_pause;
 
-	netdev->stats.rx_packets = adapter->soft_stats.rx_packets;
-	netdev->stats.tx_packets = adapter->soft_stats.tx_packets;
 	netdev->stats.rx_bytes = adapter->soft_stats.rx_bytes;
 	netdev->stats.tx_bytes = adapter->soft_stats.tx_bytes;
 	netdev->stats.multicast = adapter->soft_stats.multicast;
 	netdev->stats.collisions = adapter->soft_stats.collisions;
 	netdev->stats.rx_errors = adapter->soft_stats.rx_errors;
-	netdev->stats.rx_over_errors =
-		adapter->soft_stats.rx_missed_errors;
 	netdev->stats.rx_length_errors =
 		adapter->soft_stats.rx_length_errors;
 	netdev->stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
 	netdev->stats.rx_frame_errors =
 		adapter->soft_stats.rx_frame_errors;
 	netdev->stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
-	netdev->stats.rx_missed_errors =
-		adapter->soft_stats.rx_missed_errors;
+	netdev->stats.rx_dropped = adapter->soft_stats.rx_rrd_ov;
 	netdev->stats.tx_errors = adapter->soft_stats.tx_errors;
 	netdev->stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
 	netdev->stats.tx_aborted_errors =
@@ -1743,6 +1747,9 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
 		adapter->soft_stats.tx_window_errors;
 	netdev->stats.tx_carrier_errors =
 		adapter->soft_stats.tx_carrier_errors;
+
+	netdev->stats.rx_packets = adapter->soft_stats.rx_packets;
+	netdev->stats.tx_packets = adapter->soft_stats.tx_packets;
 }
 
 static void atl1_update_mailbox(struct atl1_adapter *adapter)
@@ -1872,7 +1879,7 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
 						adapter->rx_buffer_len);
 		if (unlikely(!skb)) {
 			/* Better luck next round */
-			adapter->netdev->stats.rx_dropped++;
+			adapter->soft_stats.rx_dropped++;
 			break;
 		}
 
@@ -3122,7 +3129,8 @@ static void atl1_remove(struct pci_dev *pdev)
 	 * from the BIOS during POST.  If we've been messing with the MAC
 	 * address, we need to save the permanent one.
 	 */
-	if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) {
+	if (!ether_addr_equal_unaligned(adapter->hw.mac_addr,
+					adapter->hw.perm_mac_addr)) {
 		memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr,
 			ETH_ALEN);
 		atl1_set_mac_addr(&adapter->hw);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h
index 3bf79a56220d..34a58cd846a0 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.h
+++ b/drivers/net/ethernet/atheros/atlx/atl1.h
@@ -666,6 +666,7 @@ struct atl1_sft_stats {
 	u64 rx_errors;
 	u64 rx_length_errors;
 	u64 rx_crc_errors;
+	u64 rx_dropped;
 	u64 rx_frame_errors;
 	u64 rx_fifo_errors;
 	u64 rx_missed_errors;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 2fa5b86f139d..3f97d9fd0a71 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -23,6 +23,7 @@ config B44
 	depends on SSB_POSSIBLE && HAS_DMA
 	select SSB
 	select MII
+	select PHYLIB
 	---help---
 	  If you have a network (Ethernet) controller of this type, say Y
 	  or M and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 90e54d5488dc..1f7b5aa114fa 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -6,6 +6,7 @@
  * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
  * Copyright (C) 2006 Broadcom Corporation.
  * Copyright (C) 2007 Michael Buesch <m@bues.ch>
+ * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
  *
  * Distribute under GPL.
  */
@@ -29,6 +30,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/ssb/ssb.h>
 #include <linux/slab.h>
+#include <linux/phy.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
@@ -284,7 +286,7 @@ static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
 
 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
 {
-	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
 		return 0;
 
 	return __b44_readphy(bp, bp->phy_addr, reg, val);
@@ -292,14 +294,14 @@ static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
 
 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
 {
-	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
 		return 0;
 
 	return __b44_writephy(bp, bp->phy_addr, reg, val);
 }
 
 /* miilib interface */
-static int b44_mii_read(struct net_device *dev, int phy_id, int location)
+static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)
 {
 	u32 val;
 	struct b44 *bp = netdev_priv(dev);
@@ -309,19 +311,36 @@ static int b44_mii_read(struct net_device *dev, int phy_id, int location)
 	return val;
 }
 
-static void b44_mii_write(struct net_device *dev, int phy_id, int location,
-			 int val)
+static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location,
+			       int val)
 {
 	struct b44 *bp = netdev_priv(dev);
 	__b44_writephy(bp, phy_id, location, val);
 }
 
+static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location)
+{
+	u32 val;
+	struct b44 *bp = bus->priv;
+	int rc = __b44_readphy(bp, phy_id, location, &val);
+	if (rc)
+		return 0xffffffff;
+	return val;
+}
+
+static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location,
+				 u16 val)
+{
+	struct b44 *bp = bus->priv;
+	return __b44_writephy(bp, phy_id, location, val);
+}
+
 static int b44_phy_reset(struct b44 *bp)
 {
 	u32 val;
 	int err;
 
-	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
 		return 0;
 	err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
 	if (err)
@@ -423,7 +442,7 @@ static int b44_setup_phy(struct b44 *bp)
 
 	b44_wap54g10_workaround(bp);
 
-	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
 		return 0;
 	if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
 		goto out;
@@ -521,12 +540,14 @@ static void b44_check_phy(struct b44 *bp)
 {
 	u32 bmsr, aux;
 
-	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
 		bp->flags |= B44_FLAG_100_BASE_T;
-		bp->flags |= B44_FLAG_FULL_DUPLEX;
 		if (!netif_carrier_ok(bp->dev)) {
 			u32 val = br32(bp, B44_TX_CTRL);
-			val |= TX_CTRL_DUPLEX;
+			if (bp->flags & B44_FLAG_FULL_DUPLEX)
+				val |= TX_CTRL_DUPLEX;
+			else
+				val &= ~TX_CTRL_DUPLEX;
 			bw32(bp, B44_TX_CTRL, val);
 			netif_carrier_on(bp->dev);
 			b44_link_report(bp);
@@ -1315,7 +1336,7 @@ static void b44_chip_reset(struct b44 *bp, int reset_kind)
 	if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
 		bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
 		br32(bp, B44_ENET_CTRL);
-		bp->flags &= ~B44_FLAG_INTERNAL_PHY;
+		bp->flags |= B44_FLAG_EXTERNAL_PHY;
 	} else {
 		u32 val = br32(bp, B44_DEVCTRL);
 
@@ -1324,7 +1345,7 @@ static void b44_chip_reset(struct b44 *bp, int reset_kind)
 			br32(bp, B44_DEVCTRL);
 			udelay(100);
 		}
-		bp->flags |= B44_FLAG_INTERNAL_PHY;
+		bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
 	}
 }
 
@@ -1339,7 +1360,10 @@ static void b44_halt(struct b44 *bp)
 	bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
 	/* now reset the chip, but without enabling the MAC&PHY
 	 * part of it. This has to be done _after_ we shut down the PHY */
-	b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+		b44_chip_reset(bp, B44_CHIP_RESET_FULL);
+	else
+		b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
 }
 
 /* bp->lock is held. */
@@ -1805,6 +1829,11 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct b44 *bp = netdev_priv(dev);
 
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+		BUG_ON(!bp->phydev);
+		return phy_ethtool_gset(bp->phydev, cmd);
+	}
+
 	cmd->supported = (SUPPORTED_Autoneg);
 	cmd->supported |= (SUPPORTED_100baseT_Half |
 			  SUPPORTED_100baseT_Full |
@@ -1828,8 +1857,8 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 		DUPLEX_FULL : DUPLEX_HALF;
 	cmd->port = 0;
 	cmd->phy_address = bp->phy_addr;
-	cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
-		XCVR_INTERNAL : XCVR_EXTERNAL;
+	cmd->transceiver = (bp->flags & B44_FLAG_EXTERNAL_PHY) ?
+		XCVR_EXTERNAL : XCVR_INTERNAL;
 	cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
 		AUTONEG_DISABLE : AUTONEG_ENABLE;
 	if (cmd->autoneg == AUTONEG_ENABLE)
@@ -1846,7 +1875,23 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct b44 *bp = netdev_priv(dev);
-	u32 speed = ethtool_cmd_speed(cmd);
+	u32 speed;
+	int ret;
+
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+		BUG_ON(!bp->phydev);
+		spin_lock_irq(&bp->lock);
+		if (netif_running(dev))
+			b44_setup_phy(bp);
+
+		ret = phy_ethtool_sset(bp->phydev, cmd);
+
+		spin_unlock_irq(&bp->lock);
+
+		return ret;
+	}
+
+	speed = ethtool_cmd_speed(cmd);
 
 	/* We do not support gigabit. */
 	if (cmd->autoneg == AUTONEG_ENABLE) {
@@ -2076,7 +2121,6 @@ static const struct ethtool_ops b44_ethtool_ops = {
 
 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
-	struct mii_ioctl_data *data = if_mii(ifr);
 	struct b44 *bp = netdev_priv(dev);
 	int err = -EINVAL;
 
@@ -2084,7 +2128,12 @@ static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 		goto out;
 
 	spin_lock_irq(&bp->lock);
-	err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+		BUG_ON(!bp->phydev);
+		err = phy_mii_ioctl(bp->phydev, ifr, cmd);
+	} else {
+		err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
+	}
 	spin_unlock_irq(&bp->lock);
 out:
 	return err;
@@ -2146,6 +2195,141 @@ static const struct net_device_ops b44_netdev_ops = {
 #endif
 };
 
+static void b44_adjust_link(struct net_device *dev)
+{
+	struct b44 *bp = netdev_priv(dev);
+	struct phy_device *phydev = bp->phydev;
+	bool status_changed = 0;
+
+	BUG_ON(!phydev);
+
+	if (bp->old_link != phydev->link) {
+		status_changed = 1;
+		bp->old_link = phydev->link;
+	}
+
+	/* reflect duplex change */
+	if (phydev->link) {
+		if ((phydev->duplex == DUPLEX_HALF) &&
+		    (bp->flags & B44_FLAG_FULL_DUPLEX)) {
+			status_changed = 1;
+			bp->flags &= ~B44_FLAG_FULL_DUPLEX;
+		} else if ((phydev->duplex == DUPLEX_FULL) &&
+			   !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
+			status_changed = 1;
+			bp->flags |= B44_FLAG_FULL_DUPLEX;
+		}
+	}
+
+	if (status_changed) {
+		b44_check_phy(bp);
+		phy_print_status(phydev);
+	}
+}
+
+static int b44_register_phy_one(struct b44 *bp)
+{
+	struct mii_bus *mii_bus;
+	struct ssb_device *sdev = bp->sdev;
+	struct phy_device *phydev;
+	char bus_id[MII_BUS_ID_SIZE + 3];
+	struct ssb_sprom *sprom = &sdev->bus->sprom;
+	int err;
+
+	mii_bus = mdiobus_alloc();
+	if (!mii_bus) {
+		dev_err(sdev->dev, "mdiobus_alloc() failed\n");
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	mii_bus->priv = bp;
+	mii_bus->read = b44_mdio_read_phylib;
+	mii_bus->write = b44_mdio_write_phylib;
+	mii_bus->name = "b44_eth_mii";
+	mii_bus->parent = sdev->dev;
+	mii_bus->phy_mask = ~(1 << bp->phy_addr);
+	snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance);
+	mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+	if (!mii_bus->irq) {
+		dev_err(sdev->dev, "mii_bus irq allocation failed\n");
+		err = -ENOMEM;
+		goto err_out_mdiobus;
+	}
+
+	memset(mii_bus->irq, PHY_POLL, sizeof(int) * PHY_MAX_ADDR);
+
+	bp->mii_bus = mii_bus;
+
+	err = mdiobus_register(mii_bus);
+	if (err) {
+		dev_err(sdev->dev, "failed to register MII bus\n");
+		goto err_out_mdiobus_irq;
+	}
+
+	if (!bp->mii_bus->phy_map[bp->phy_addr] &&
+	    (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
+
+		dev_info(sdev->dev,
+			 "could not find PHY at %i, use fixed one\n",
+			 bp->phy_addr);
+
+		bp->phy_addr = 0;
+		snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
+			 bp->phy_addr);
+	} else {
+		snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
+			 bp->phy_addr);
+	}
+
+	phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
+			     PHY_INTERFACE_MODE_MII);
+	if (IS_ERR(phydev)) {
+		dev_err(sdev->dev, "could not attach PHY at %i\n",
+			bp->phy_addr);
+		err = PTR_ERR(phydev);
+		goto err_out_mdiobus_unregister;
+	}
+
+	/* mask with MAC supported features */
+	phydev->supported &= (SUPPORTED_100baseT_Half |
+			      SUPPORTED_100baseT_Full |
+			      SUPPORTED_Autoneg |
+			      SUPPORTED_MII);
+	phydev->advertising = phydev->supported;
+
+	bp->phydev = phydev;
+	bp->old_link = 0;
+	bp->phy_addr = phydev->addr;
+
+	dev_info(sdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+		 phydev->drv->name, dev_name(&phydev->dev));
+
+	return 0;
+
+err_out_mdiobus_unregister:
+	mdiobus_unregister(mii_bus);
+
+err_out_mdiobus_irq:
+	kfree(mii_bus->irq);
+
+err_out_mdiobus:
+	mdiobus_free(mii_bus);
+
+err_out:
+	return err;
+}
+
+static void b44_unregister_phy_one(struct b44 *bp)
+{
+	struct mii_bus *mii_bus = bp->mii_bus;
+
+	phy_disconnect(bp->phydev);
+	mdiobus_unregister(mii_bus);
+	kfree(mii_bus->irq);
+	mdiobus_free(mii_bus);
+}
+
 static int b44_init_one(struct ssb_device *sdev,
 			const struct ssb_device_id *ent)
 {
@@ -2206,9 +2390,15 @@ static int b44_init_one(struct ssb_device *sdev,
 		goto err_out_powerdown;
 	}
 
+	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
+		dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
+		err = -ENODEV;
+		goto err_out_powerdown;
+	}
+
 	bp->mii_if.dev = dev;
-	bp->mii_if.mdio_read = b44_mii_read;
-	bp->mii_if.mdio_write = b44_mii_write;
+	bp->mii_if.mdio_read = b44_mdio_read_mii;
+	bp->mii_if.mdio_write = b44_mdio_write_mii;
 	bp->mii_if.phy_id = bp->phy_addr;
 	bp->mii_if.phy_id_mask = 0x1f;
 	bp->mii_if.reg_num_mask = 0x1f;
@@ -2236,13 +2426,26 @@ static int b44_init_one(struct ssb_device *sdev,
 	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
 
 	/* do a phy reset to test if there is an active phy */
-	if (b44_phy_reset(bp) < 0)
-		bp->phy_addr = B44_PHY_ADDR_NO_PHY;
+	err = b44_phy_reset(bp);
+	if (err < 0) {
+		dev_err(sdev->dev, "phy reset failed\n");
+		goto err_out_unregister_netdev;
+	}
+
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
+		err = b44_register_phy_one(bp);
+		if (err) {
+			dev_err(sdev->dev, "Cannot register PHY, aborting\n");
+			goto err_out_unregister_netdev;
+		}
+	}
 
 	netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
 
 	return 0;
 
+err_out_unregister_netdev:
+	unregister_netdev(dev);
 err_out_powerdown:
 	ssb_bus_may_powerdown(sdev->bus);
 
@@ -2256,8 +2459,11 @@ out:
 static void b44_remove_one(struct ssb_device *sdev)
 {
 	struct net_device *dev = ssb_get_drvdata(sdev);
+	struct b44 *bp = netdev_priv(dev);
 
 	unregister_netdev(dev);
+	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+		b44_unregister_phy_one(bp);
 	ssb_device_disable(sdev, 0);
 	ssb_bus_may_powerdown(sdev->bus);
 	free_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
index 8993d72f0420..3e9c3fc7591b 100644
--- a/drivers/net/ethernet/broadcom/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -280,9 +280,10 @@ struct ring_info {
 	dma_addr_t	mapping;
 };
 
-#define B44_MCAST_TABLE_SIZE	32
-#define B44_PHY_ADDR_NO_PHY	30
-#define B44_MDC_RATIO		5000000
+#define B44_MCAST_TABLE_SIZE		32
+#define B44_PHY_ADDR_NO_LOCAL_PHY	30 /* no local phy regs */
+#define B44_PHY_ADDR_NO_PHY		31 /* no phy present at all */
+#define B44_MDC_RATIO			5000000
 
 #define	B44_STAT_REG_DECLARE		\
 	_B44(tx_good_octets)		\
@@ -344,6 +345,9 @@ B44_STAT_REG_DECLARE
 	struct u64_stats_sync	syncp;
 };
 
+#define	B44_BOARDFLAG_ROBO		0x0010  /* Board has robo switch */
+#define	B44_BOARDFLAG_ADM		0x0080  /* Board has ADMtek switch */
+
 struct ssb_device;
 
 struct b44 {
@@ -376,7 +380,7 @@ struct b44 {
 #define B44_FLAG_ADV_10FULL	0x02000000
 #define B44_FLAG_ADV_100HALF	0x04000000
 #define B44_FLAG_ADV_100FULL	0x08000000
-#define B44_FLAG_INTERNAL_PHY	0x10000000
+#define B44_FLAG_EXTERNAL_PHY	0x10000000
 #define B44_FLAG_RX_RING_HACK	0x20000000
 #define B44_FLAG_TX_RING_HACK	0x40000000
 #define B44_FLAG_WOL_ENABLE	0x80000000
@@ -396,6 +400,9 @@ struct b44 {
 	u32			tx_pending;
 	u8			phy_addr;
 	u8			force_copybreak;
+	struct phy_device	*phydev;
+	struct mii_bus		*mii_bus;
+	int			old_link;
 	struct mii_if_info	mii_if;
 };
 
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index e2aa09ce6af7..0297a79a38e1 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -96,6 +96,19 @@ static void bgmac_dma_tx_enable(struct bgmac *bgmac,
 	u32 ctl;
 
 	ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
+	if (bgmac->core->id.rev >= 4) {
+		ctl &= ~BGMAC_DMA_TX_BL_MASK;
+		ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;
+
+		ctl &= ~BGMAC_DMA_TX_MR_MASK;
+		ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT;
+
+		ctl &= ~BGMAC_DMA_TX_PC_MASK;
+		ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT;
+
+		ctl &= ~BGMAC_DMA_TX_PT_MASK;
+		ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT;
+	}
 	ctl |= BGMAC_DMA_TX_ENABLE;
 	ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
 	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
@@ -240,6 +253,16 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
 	u32 ctl;
 
 	ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
+	if (bgmac->core->id.rev >= 4) {
+		ctl &= ~BGMAC_DMA_RX_BL_MASK;
+		ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
+
+		ctl &= ~BGMAC_DMA_RX_PC_MASK;
+		ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT;
+
+		ctl &= ~BGMAC_DMA_RX_PT_MASK;
+		ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
+	}
 	ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
 	ctl |= BGMAC_DMA_RX_ENABLE;
 	ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
@@ -682,70 +705,6 @@ static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
 	return 0;
 }
 
-/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */
-static void bgmac_phy_force(struct bgmac *bgmac)
-{
-	u16 ctl;
-	u16 mask = ~(BGMAC_PHY_CTL_SPEED | BGMAC_PHY_CTL_SPEED_MSB |
-		     BGMAC_PHY_CTL_ANENAB | BGMAC_PHY_CTL_DUPLEX);
-
-	if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
-		return;
-
-	if (bgmac->autoneg)
-		return;
-
-	ctl = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL);
-	ctl &= mask;
-	if (bgmac->full_duplex)
-		ctl |= BGMAC_PHY_CTL_DUPLEX;
-	if (bgmac->speed == BGMAC_SPEED_100)
-		ctl |= BGMAC_PHY_CTL_SPEED_100;
-	else if (bgmac->speed == BGMAC_SPEED_1000)
-		ctl |= BGMAC_PHY_CTL_SPEED_1000;
-	bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, ctl);
-}
-
-/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyadvertise */
-static void bgmac_phy_advertise(struct bgmac *bgmac)
-{
-	u16 adv;
-
-	if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
-		return;
-
-	if (!bgmac->autoneg)
-		return;
-
-	/* Adv selected 10/100 speeds */
-	adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV);
-	adv &= ~(BGMAC_PHY_ADV_10HALF | BGMAC_PHY_ADV_10FULL |
-		 BGMAC_PHY_ADV_100HALF | BGMAC_PHY_ADV_100FULL);
-	if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
-		adv |= BGMAC_PHY_ADV_10HALF;
-	if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
-		adv |= BGMAC_PHY_ADV_100HALF;
-	if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
-		adv |= BGMAC_PHY_ADV_10FULL;
-	if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
-		adv |= BGMAC_PHY_ADV_100FULL;
-	bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV, adv);
-
-	/* Adv selected 1000 speeds */
-	adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2);
-	adv &= ~(BGMAC_PHY_ADV2_1000HALF | BGMAC_PHY_ADV2_1000FULL);
-	if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
-		adv |= BGMAC_PHY_ADV2_1000HALF;
-	if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
-		adv |= BGMAC_PHY_ADV2_1000FULL;
-	bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2, adv);
-
-	/* Restart */
-	bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
-			bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) |
-			BGMAC_PHY_CTL_RESTART);
-}
-
 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
 static void bgmac_phy_init(struct bgmac *bgmac)
 {
@@ -789,11 +748,9 @@ static void bgmac_phy_reset(struct bgmac *bgmac)
 	if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
 		return;
 
-	bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
-			BGMAC_PHY_CTL_RESET);
+	bgmac_phy_write(bgmac, bgmac->phyaddr, MII_BMCR, BMCR_RESET);
 	udelay(100);
-	if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) &
-	    BGMAC_PHY_CTL_RESET)
+	if (bgmac_phy_read(bgmac, bgmac->phyaddr, MII_BMCR) & BMCR_RESET)
 		bgmac_err(bgmac, "PHY reset failed\n");
 	bgmac_phy_init(bgmac);
 }
@@ -811,13 +768,13 @@ static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
 	u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
 	u32 new_val = (cmdcfg & mask) | set;
 
-	bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR);
+	bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR(bgmac->core->id.rev));
 	udelay(2);
 
 	if (new_val != cmdcfg || force)
 		bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
 
-	bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR);
+	bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR(bgmac->core->id.rev));
 	udelay(2);
 }
 
@@ -876,31 +833,56 @@ static void bgmac_clear_mib(struct bgmac *bgmac)
 }
 
 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
-static void bgmac_speed(struct bgmac *bgmac, int speed)
+static void bgmac_mac_speed(struct bgmac *bgmac)
 {
 	u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
 	u32 set = 0;
 
-	if (speed & BGMAC_SPEED_10)
+	switch (bgmac->mac_speed) {
+	case SPEED_10:
 		set |= BGMAC_CMDCFG_ES_10;
-	if (speed & BGMAC_SPEED_100)
+		break;
+	case SPEED_100:
 		set |= BGMAC_CMDCFG_ES_100;
-	if (speed & BGMAC_SPEED_1000)
+		break;
+	case SPEED_1000:
 		set |= BGMAC_CMDCFG_ES_1000;
-	if (!bgmac->full_duplex)
+		break;
+	case SPEED_2500:
+		set |= BGMAC_CMDCFG_ES_2500;
+		break;
+	default:
+		bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed);
+	}
+
+	if (bgmac->mac_duplex == DUPLEX_HALF)
 		set |= BGMAC_CMDCFG_HD;
+
 	bgmac_cmdcfg_maskset(bgmac, mask, set, true);
 }
 
 static void bgmac_miiconfig(struct bgmac *bgmac)
 {
-	u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
-			BGMAC_DS_MM_SHIFT;
-	if (imode == 0 || imode == 1) {
-		if (bgmac->autoneg)
-			bgmac_speed(bgmac, BGMAC_SPEED_100);
-		else
-			bgmac_speed(bgmac, bgmac->speed);
+	struct bcma_device *core = bgmac->core;
+	struct bcma_chipinfo *ci = &core->bus->chipinfo;
+	u8 imode;
+
+	if (ci->id == BCMA_CHIP_ID_BCM4707 ||
+	    ci->id == BCMA_CHIP_ID_BCM53018) {
+		bcma_awrite32(core, BCMA_IOCTL,
+			      bcma_aread32(core, BCMA_IOCTL) | 0x40 |
+			      BGMAC_BCMA_IOCTL_SW_CLKEN);
+		bgmac->mac_speed = SPEED_2500;
+		bgmac->mac_duplex = DUPLEX_FULL;
+		bgmac_mac_speed(bgmac);
+	} else {
+		imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
+			BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
+		if (imode == 0 || imode == 1) {
+			bgmac->mac_speed = SPEED_100;
+			bgmac->mac_duplex = DUPLEX_FULL;
+			bgmac_mac_speed(bgmac);
+		}
 	}
 }
 
@@ -910,7 +892,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
 	struct bcma_device *core = bgmac->core;
 	struct bcma_bus *bus = core->bus;
 	struct bcma_chipinfo *ci = &bus->chipinfo;
-	u32 flags = 0;
+	u32 flags;
 	u32 iost;
 	int i;
 
@@ -933,26 +915,36 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
 	}
 
 	iost = bcma_aread32(core, BCMA_IOST);
-	if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 10) ||
+	if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
 	    (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
-	    (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9))
+	    (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188))
 		iost &= ~BGMAC_BCMA_IOST_ATTACHED;
 
-	if (iost & BGMAC_BCMA_IOST_ATTACHED) {
-		flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
-		if (!bgmac->has_robosw)
-			flags |= BGMAC_BCMA_IOCTL_SW_RESET;
+	/* 3GMAC: for BCM4707, only do core reset at bgmac_probe() */
+	if (ci->id != BCMA_CHIP_ID_BCM4707) {
+		flags = 0;
+		if (iost & BGMAC_BCMA_IOST_ATTACHED) {
+			flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
+			if (!bgmac->has_robosw)
+				flags |= BGMAC_BCMA_IOCTL_SW_RESET;
+		}
+		bcma_core_enable(core, flags);
 	}
 
-	bcma_core_enable(core, flags);
-
-	if (core->id.rev > 2) {
-		bgmac_set(bgmac, BCMA_CLKCTLST, 1 << 8);
-		bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1 << 24, 1 << 24,
+	/* Request Misc PLL for corerev > 2 */
+	if (core->id.rev > 2 &&
+	    ci->id != BCMA_CHIP_ID_BCM4707 &&
+	    ci->id != BCMA_CHIP_ID_BCM53018) {
+		bgmac_set(bgmac, BCMA_CLKCTLST,
+			  BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
+		bgmac_wait_value(bgmac->core, BCMA_CLKCTLST,
+				 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
+				 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
 				 1000);
 	}
 
-	if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 ||
+	if (ci->id == BCMA_CHIP_ID_BCM5357 ||
+	    ci->id == BCMA_CHIP_ID_BCM4749 ||
 	    ci->id == BCMA_CHIP_ID_BCM53572) {
 		struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
 		u8 et_swtype = 0;
@@ -967,10 +959,11 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
 			et_swtype &= 0x0f;
 			et_swtype <<= 4;
 			sw_type = et_swtype;
-		} else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) {
+		} else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) {
 			sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
-		} else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) ||
-			   (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) {
+		} else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
+			   (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
+			   (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
 			sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
 				  BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
 		}
@@ -1007,8 +1000,10 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
 			     BGMAC_CMDCFG_PROM |
 			     BGMAC_CMDCFG_NLC |
 			     BGMAC_CMDCFG_CFE |
-			     BGMAC_CMDCFG_SR,
+			     BGMAC_CMDCFG_SR(core->id.rev),
 			     false);
+	bgmac->mac_speed = SPEED_UNKNOWN;
+	bgmac->mac_duplex = DUPLEX_UNKNOWN;
 
 	bgmac_clear_mib(bgmac);
 	if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
@@ -1048,7 +1043,7 @@ static void bgmac_enable(struct bgmac *bgmac)
 
 	cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
 	bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
-			     BGMAC_CMDCFG_SR, true);
+			     BGMAC_CMDCFG_SR(bgmac->core->id.rev), true);
 	udelay(2);
 	cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
 	bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
@@ -1077,12 +1072,16 @@ static void bgmac_enable(struct bgmac *bgmac)
 		break;
 	}
 
-	rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
-	rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
-	bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000;
-	mdp = (bp_clk * 128 / 1000) - 3;
-	rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
-	bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
+	if (ci->id != BCMA_CHIP_ID_BCM4707 &&
+	    ci->id != BCMA_CHIP_ID_BCM53018) {
+		rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
+		rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
+		bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) /
+				1000000;
+		mdp = (bp_clk * 128 / 1000) - 3;
+		rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
+		bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
+	}
 }
 
 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
@@ -1108,13 +1107,6 @@ static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
 
 	bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
 
-	if (!bgmac->autoneg) {
-		bgmac_speed(bgmac, bgmac->speed);
-		bgmac_phy_force(bgmac);
-	} else if (bgmac->speed) { /* if there is anything to adv */
-		bgmac_phy_advertise(bgmac);
-	}
-
 	if (full_init) {
 		bgmac_dma_init(bgmac);
 		if (1) /* FIXME: is there any case we don't want IRQs? */
@@ -1204,6 +1196,8 @@ static int bgmac_open(struct net_device *net_dev)
 	}
 	napi_enable(&bgmac->napi);
 
+	phy_start(bgmac->phy_dev);
+
 	netif_carrier_on(net_dev);
 
 err_out:
@@ -1216,6 +1210,8 @@ static int bgmac_stop(struct net_device *net_dev)
 
 	netif_carrier_off(net_dev);
 
+	phy_stop(bgmac->phy_dev);
+
 	napi_disable(&bgmac->napi);
 	bgmac_chip_intrs_off(bgmac);
 	free_irq(bgmac->core->irq, net_dev);
@@ -1252,27 +1248,11 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
 static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
 {
 	struct bgmac *bgmac = netdev_priv(net_dev);
-	struct mii_ioctl_data *data = if_mii(ifr);
-
-	switch (cmd) {
-	case SIOCGMIIPHY:
-		data->phy_id = bgmac->phyaddr;
-		/* fallthru */
-	case SIOCGMIIREG:
-		if (!netif_running(net_dev))
-			return -EAGAIN;
-		data->val_out = bgmac_phy_read(bgmac, data->phy_id,
-					       data->reg_num & 0x1f);
-		return 0;
-	case SIOCSMIIREG:
-		if (!netif_running(net_dev))
-			return -EAGAIN;
-		bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f,
-				data->val_in);
-		return 0;
-	default:
-		return -EOPNOTSUPP;
-	}
+
+	if (!netif_running(net_dev))
+		return -EINVAL;
+
+	return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd);
 }
 
 static const struct net_device_ops bgmac_netdev_ops = {
@@ -1294,61 +1274,16 @@ static int bgmac_get_settings(struct net_device *net_dev,
 {
 	struct bgmac *bgmac = netdev_priv(net_dev);
 
-	cmd->supported = SUPPORTED_10baseT_Half |
-			 SUPPORTED_10baseT_Full |
-			 SUPPORTED_100baseT_Half |
-			 SUPPORTED_100baseT_Full |
-			 SUPPORTED_1000baseT_Half |
-			 SUPPORTED_1000baseT_Full |
-			 SUPPORTED_Autoneg;
-
-	if (bgmac->autoneg) {
-		WARN_ON(cmd->advertising);
-		if (bgmac->full_duplex) {
-			if (bgmac->speed & BGMAC_SPEED_10)
-				cmd->advertising |= ADVERTISED_10baseT_Full;
-			if (bgmac->speed & BGMAC_SPEED_100)
-				cmd->advertising |= ADVERTISED_100baseT_Full;
-			if (bgmac->speed & BGMAC_SPEED_1000)
-				cmd->advertising |= ADVERTISED_1000baseT_Full;
-		} else {
-			if (bgmac->speed & BGMAC_SPEED_10)
-				cmd->advertising |= ADVERTISED_10baseT_Half;
-			if (bgmac->speed & BGMAC_SPEED_100)
-				cmd->advertising |= ADVERTISED_100baseT_Half;
-			if (bgmac->speed & BGMAC_SPEED_1000)
-				cmd->advertising |= ADVERTISED_1000baseT_Half;
-		}
-	} else {
-		switch (bgmac->speed) {
-		case BGMAC_SPEED_10:
-			ethtool_cmd_speed_set(cmd, SPEED_10);
-			break;
-		case BGMAC_SPEED_100:
-			ethtool_cmd_speed_set(cmd, SPEED_100);
-			break;
-		case BGMAC_SPEED_1000:
-			ethtool_cmd_speed_set(cmd, SPEED_1000);
-			break;
-		}
-	}
-
-	cmd->duplex = bgmac->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
-
-	cmd->autoneg = bgmac->autoneg;
-
-	return 0;
+	return phy_ethtool_gset(bgmac->phy_dev, cmd);
 }
 
-#if 0
 static int bgmac_set_settings(struct net_device *net_dev,
 			      struct ethtool_cmd *cmd)
 {
 	struct bgmac *bgmac = netdev_priv(net_dev);
 
-	return -1;
+	return phy_ethtool_sset(bgmac->phy_dev, cmd);
 }
-#endif
 
 static void bgmac_get_drvinfo(struct net_device *net_dev,
 			      struct ethtool_drvinfo *info)
@@ -1359,6 +1294,7 @@ static void bgmac_get_drvinfo(struct net_device *net_dev,
 
 static const struct ethtool_ops bgmac_ethtool_ops = {
 	.get_settings		= bgmac_get_settings,
+	.set_settings		= bgmac_set_settings,
 	.get_drvinfo		= bgmac_get_drvinfo,
 };
 
@@ -1377,9 +1313,35 @@ static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
 	return bgmac_phy_write(bus->priv, mii_id, regnum, value);
 }
 
+static void bgmac_adjust_link(struct net_device *net_dev)
+{
+	struct bgmac *bgmac = netdev_priv(net_dev);
+	struct phy_device *phy_dev = bgmac->phy_dev;
+	bool update = false;
+
+	if (phy_dev->link) {
+		if (phy_dev->speed != bgmac->mac_speed) {
+			bgmac->mac_speed = phy_dev->speed;
+			update = true;
+		}
+
+		if (phy_dev->duplex != bgmac->mac_duplex) {
+			bgmac->mac_duplex = phy_dev->duplex;
+			update = true;
+		}
+	}
+
+	if (update) {
+		bgmac_mac_speed(bgmac);
+		phy_print_status(phy_dev);
+	}
+}
+
 static int bgmac_mii_register(struct bgmac *bgmac)
 {
 	struct mii_bus *mii_bus;
+	struct phy_device *phy_dev;
+	char bus_id[MII_BUS_ID_SIZE + 3];
 	int i, err = 0;
 
 	mii_bus = mdiobus_alloc();
@@ -1411,8 +1373,22 @@ static int bgmac_mii_register(struct bgmac *bgmac)
 
 	bgmac->mii_bus = mii_bus;
 
+	/* Connect to the PHY */
+	snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
+		 bgmac->phyaddr);
+	phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
+			      PHY_INTERFACE_MODE_MII);
+	if (IS_ERR(phy_dev)) {
+		bgmac_err(bgmac, "PHY connecton failed\n");
+		err = PTR_ERR(phy_dev);
+		goto err_unregister_bus;
+	}
+	bgmac->phy_dev = phy_dev;
+
 	return err;
 
+err_unregister_bus:
+	mdiobus_unregister(mii_bus);
 err_free_irq:
 	kfree(mii_bus->irq);
 err_free_bus:
@@ -1467,9 +1443,6 @@ static int bgmac_probe(struct bcma_device *core)
 	bcma_set_drvdata(core, bgmac);
 
 	/* Defaults */
-	bgmac->autoneg = true;
-	bgmac->full_duplex = true;
-	bgmac->speed = BGMAC_SPEED_10 | BGMAC_SPEED_100 | BGMAC_SPEED_1000;
 	memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
 
 	/* On BCM4706 we need common core to access PHY */
@@ -1500,6 +1473,27 @@ static int bgmac_probe(struct bcma_device *core)
 
 	bgmac_chip_reset(bgmac);
 
+	/* For Northstar, we have to take all GMAC core out of reset */
+	if (core->id.id == BCMA_CHIP_ID_BCM4707 ||
+	    core->id.id == BCMA_CHIP_ID_BCM53018) {
+		struct bcma_device *ns_core;
+		int ns_gmac;
+
+		/* Northstar has 4 GMAC cores */
+		for (ns_gmac = 0; ns_gmac < 4; ns_gmac++) {
+			/* As Northstar requirement, we have to reset all GMACs
+			 * before accessing one. bgmac_chip_reset() call
+			 * bcma_core_enable() for this core. Then the other
+			 * three GMACs didn't reset.  We do it here.
+			 */
+			ns_core = bcma_find_core_unit(core->bus,
+						      BCMA_CORE_MAC_GBIT,
+						      ns_gmac);
+			if (ns_core && !bcma_core_is_enabled(ns_core))
+				bcma_core_enable(ns_core, 0);
+		}
+	}
+
 	err = bgmac_dma_alloc(bgmac);
 	if (err) {
 		bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
@@ -1524,14 +1518,12 @@ static int bgmac_probe(struct bcma_device *core)
 	err = bgmac_mii_register(bgmac);
 	if (err) {
 		bgmac_err(bgmac, "Cannot register MDIO\n");
-		err = -ENOTSUPP;
 		goto err_dma_free;
 	}
 
 	err = register_netdev(bgmac->net_dev);
 	if (err) {
 		bgmac_err(bgmac, "Cannot register net device\n");
-		err = -ENOTSUPP;
 		goto err_mii_unregister;
 	}
 
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 66c8afbdc8c7..89fa5bc69c51 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -95,7 +95,11 @@
 #define  BGMAC_RXQ_CTL_MDP_SHIFT		24
 #define BGMAC_GPIO_SELECT			0x194
 #define BGMAC_GPIO_OUTPUT_EN			0x198
-/* For 0x1e0 see BCMA_CLKCTLST */
+
+/* For 0x1e0 see BCMA_CLKCTLST. Below are BGMAC specific bits */
+#define  BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ	0x00000100
+#define  BGMAC_BCMA_CLKCTLST_MISC_PLL_ST	0x01000000
+
 #define BGMAC_HW_WAR				0x1e4
 #define BGMAC_PWR_CTL				0x1e8
 #define BGMAC_DMA_BASE0				0x200		/* Tx and Rx controller */
@@ -185,6 +189,7 @@
 #define   BGMAC_CMDCFG_ES_10			0x00000000
 #define   BGMAC_CMDCFG_ES_100			0x00000004
 #define   BGMAC_CMDCFG_ES_1000			0x00000008
+#define   BGMAC_CMDCFG_ES_2500			0x0000000C
 #define  BGMAC_CMDCFG_PROM			0x00000010	/* Set to activate promiscuous mode */
 #define  BGMAC_CMDCFG_PAD_EN			0x00000020
 #define  BGMAC_CMDCFG_CF			0x00000040
@@ -193,7 +198,9 @@
 #define  BGMAC_CMDCFG_TAI			0x00000200
 #define  BGMAC_CMDCFG_HD			0x00000400	/* Set if in half duplex mode */
 #define  BGMAC_CMDCFG_HD_SHIFT			10
-#define  BGMAC_CMDCFG_SR			0x00000800	/* Set to reset mode */
+#define  BGMAC_CMDCFG_SR_REV0			0x00000800	/* Set to reset mode, for other revs */
+#define  BGMAC_CMDCFG_SR_REV4			0x00002000	/* Set to reset mode, only for core rev 4 */
+#define  BGMAC_CMDCFG_SR(rev)  ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
 #define  BGMAC_CMDCFG_ML			0x00008000	/* Set to activate mac loopback mode */
 #define  BGMAC_CMDCFG_AE			0x00400000
 #define  BGMAC_CMDCFG_CFE			0x00800000
@@ -216,27 +223,6 @@
 #define BGMAC_RX_STATUS				0xb38
 #define BGMAC_TX_STATUS				0xb3c
 
-#define BGMAC_PHY_CTL				0x00
-#define  BGMAC_PHY_CTL_SPEED_MSB		0x0040
-#define  BGMAC_PHY_CTL_DUPLEX			0x0100		/* duplex mode */
-#define  BGMAC_PHY_CTL_RESTART			0x0200		/* restart autonegotiation */
-#define  BGMAC_PHY_CTL_ANENAB			0x1000		/* enable autonegotiation */
-#define  BGMAC_PHY_CTL_SPEED			0x2000
-#define  BGMAC_PHY_CTL_LOOP			0x4000		/* loopback */
-#define  BGMAC_PHY_CTL_RESET			0x8000		/* reset */
-/* Helpers */
-#define  BGMAC_PHY_CTL_SPEED_10			0
-#define  BGMAC_PHY_CTL_SPEED_100		BGMAC_PHY_CTL_SPEED
-#define  BGMAC_PHY_CTL_SPEED_1000		BGMAC_PHY_CTL_SPEED_MSB
-#define BGMAC_PHY_ADV				0x04
-#define  BGMAC_PHY_ADV_10HALF			0x0020		/* advertise 10MBits/s half duplex */
-#define  BGMAC_PHY_ADV_10FULL			0x0040		/* advertise 10MBits/s full duplex */
-#define  BGMAC_PHY_ADV_100HALF			0x0080		/* advertise 100MBits/s half duplex */
-#define  BGMAC_PHY_ADV_100FULL			0x0100		/* advertise 100MBits/s full duplex */
-#define BGMAC_PHY_ADV2				0x09
-#define  BGMAC_PHY_ADV2_1000HALF		0x0100		/* advertise 1000MBits/s half duplex */
-#define  BGMAC_PHY_ADV2_1000FULL		0x0200		/* advertise 1000MBits/s full duplex */
-
 /* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */
 #define BGMAC_BCMA_IOCTL_SW_CLKEN		0x00000004	/* PHY Clock Enable */
 #define BGMAC_BCMA_IOCTL_SW_RESET		0x00000008	/* PHY Reset */
@@ -254,9 +240,34 @@
 #define  BGMAC_DMA_TX_SUSPEND			0x00000002
 #define  BGMAC_DMA_TX_LOOPBACK			0x00000004
 #define  BGMAC_DMA_TX_FLUSH			0x00000010
+#define  BGMAC_DMA_TX_MR_MASK			0x000000C0	/* Multiple outstanding reads */
+#define  BGMAC_DMA_TX_MR_SHIFT			6
+#define   BGMAC_DMA_TX_MR_1			0
+#define   BGMAC_DMA_TX_MR_2			1
 #define  BGMAC_DMA_TX_PARITY_DISABLE		0x00000800
 #define  BGMAC_DMA_TX_ADDREXT_MASK		0x00030000
 #define  BGMAC_DMA_TX_ADDREXT_SHIFT		16
+#define  BGMAC_DMA_TX_BL_MASK			0x001C0000	/* BurstLen bits */
+#define  BGMAC_DMA_TX_BL_SHIFT			18
+#define   BGMAC_DMA_TX_BL_16			0
+#define   BGMAC_DMA_TX_BL_32			1
+#define   BGMAC_DMA_TX_BL_64			2
+#define   BGMAC_DMA_TX_BL_128			3
+#define   BGMAC_DMA_TX_BL_256			4
+#define   BGMAC_DMA_TX_BL_512			5
+#define   BGMAC_DMA_TX_BL_1024			6
+#define  BGMAC_DMA_TX_PC_MASK			0x00E00000	/* Prefetch control */
+#define  BGMAC_DMA_TX_PC_SHIFT			21
+#define   BGMAC_DMA_TX_PC_0			0
+#define   BGMAC_DMA_TX_PC_4			1
+#define   BGMAC_DMA_TX_PC_8			2
+#define   BGMAC_DMA_TX_PC_16			3
+#define  BGMAC_DMA_TX_PT_MASK			0x03000000	/* Prefetch threshold */
+#define  BGMAC_DMA_TX_PT_SHIFT			24
+#define   BGMAC_DMA_TX_PT_1			0
+#define   BGMAC_DMA_TX_PT_2			1
+#define   BGMAC_DMA_TX_PT_4			2
+#define   BGMAC_DMA_TX_PT_8			3
 #define BGMAC_DMA_TX_INDEX			0x04
 #define BGMAC_DMA_TX_RINGLO			0x08
 #define BGMAC_DMA_TX_RINGHI			0x0C
@@ -284,8 +295,33 @@
 #define  BGMAC_DMA_RX_DIRECT_FIFO		0x00000100
 #define  BGMAC_DMA_RX_OVERFLOW_CONT		0x00000400
 #define  BGMAC_DMA_RX_PARITY_DISABLE		0x00000800
+#define  BGMAC_DMA_RX_MR_MASK			0x000000C0	/* Multiple outstanding reads */
+#define  BGMAC_DMA_RX_MR_SHIFT			6
+#define   BGMAC_DMA_TX_MR_1			0
+#define   BGMAC_DMA_TX_MR_2			1
 #define  BGMAC_DMA_RX_ADDREXT_MASK		0x00030000
 #define  BGMAC_DMA_RX_ADDREXT_SHIFT		16
+#define  BGMAC_DMA_RX_BL_MASK			0x001C0000	/* BurstLen bits */
+#define  BGMAC_DMA_RX_BL_SHIFT			18
+#define   BGMAC_DMA_RX_BL_16			0
+#define   BGMAC_DMA_RX_BL_32			1
+#define   BGMAC_DMA_RX_BL_64			2
+#define   BGMAC_DMA_RX_BL_128			3
+#define   BGMAC_DMA_RX_BL_256			4
+#define   BGMAC_DMA_RX_BL_512			5
+#define   BGMAC_DMA_RX_BL_1024			6
+#define  BGMAC_DMA_RX_PC_MASK			0x00E00000	/* Prefetch control */
+#define  BGMAC_DMA_RX_PC_SHIFT			21
+#define   BGMAC_DMA_RX_PC_0			0
+#define   BGMAC_DMA_RX_PC_4			1
+#define   BGMAC_DMA_RX_PC_8			2
+#define   BGMAC_DMA_RX_PC_16			3
+#define  BGMAC_DMA_RX_PT_MASK			0x03000000	/* Prefetch threshold */
+#define  BGMAC_DMA_RX_PT_SHIFT			24
+#define   BGMAC_DMA_RX_PT_1			0
+#define   BGMAC_DMA_RX_PT_2			1
+#define   BGMAC_DMA_RX_PT_4			2
+#define   BGMAC_DMA_RX_PT_8			3
 #define BGMAC_DMA_RX_INDEX			0x24
 #define BGMAC_DMA_RX_RINGLO			0x28
 #define BGMAC_DMA_RX_RINGHI			0x2C
@@ -342,10 +378,6 @@
 #define BGMAC_CHIPCTL_1_SW_TYPE_RGMII		0x000000C0
 #define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS		0x00010000
 
-#define BGMAC_SPEED_10				0x0001
-#define BGMAC_SPEED_100				0x0002
-#define BGMAC_SPEED_1000			0x0004
-
 #define BGMAC_WEIGHT	64
 
 #define ETHER_MAX_LEN   1518
@@ -402,6 +434,7 @@ struct bgmac {
 	struct net_device *net_dev;
 	struct napi_struct napi;
 	struct mii_bus *mii_bus;
+	struct phy_device *phy_dev;
 
 	/* DMA */
 	struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
@@ -416,10 +449,9 @@ struct bgmac {
 	u32 int_mask;
 	u32 int_status;
 
-	/* Speed-related */
-	int speed;
-	bool autoneg;
-	bool full_duplex;
+	/* Current MAC state */
+	int mac_speed;
+	int mac_duplex;
 
 	u8 phyaddr;
 	bool has_robosw;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index d9980ad00b4b..9d2dedadf2df 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -23,7 +23,6 @@
 #include <linux/vmalloc.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
@@ -58,8 +57,8 @@
 #include "bnx2_fw.h"
 
 #define DRV_MODULE_NAME		"bnx2"
-#define DRV_MODULE_VERSION	"2.2.4"
-#define DRV_MODULE_RELDATE	"Aug 05, 2013"
+#define DRV_MODULE_VERSION	"2.2.5"
+#define DRV_MODULE_RELDATE	"December 20, 2013"
 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -1197,6 +1196,8 @@ bnx2_copper_linkup(struct bnx2 *bp)
 {
 	u32 bmcr;
 
+	bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
+
 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
 	if (bmcr & BMCR_ANENABLE) {
 		u32 local_adv, remote_adv, common;
@@ -1255,6 +1256,14 @@ bnx2_copper_linkup(struct bnx2 *bp)
 		}
 	}
 
+	if (bp->link_up) {
+		u32 ext_status;
+
+		bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
+		if (ext_status & EXT_STATUS_MDIX)
+			bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
+	}
+
 	return 0;
 }
 
@@ -2048,29 +2057,27 @@ bnx2_setup_copper_phy(struct bnx2 *bp)
 __releases(&bp->phy_lock)
 __acquires(&bp->phy_lock)
 {
-	u32 bmcr;
+	u32 bmcr, adv_reg, new_adv = 0;
 	u32 new_bmcr;
 
 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
 
+	bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
+	adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
+		    ADVERTISE_PAUSE_ASYM);
+
+	new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
+
 	if (bp->autoneg & AUTONEG_SPEED) {
-		u32 adv_reg, adv1000_reg;
-		u32 new_adv = 0;
+		u32 adv1000_reg;
 		u32 new_adv1000 = 0;
 
-		bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
-		adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
-			ADVERTISE_PAUSE_ASYM);
+		new_adv |= bnx2_phy_get_pause_adv(bp);
 
 		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
 		adv1000_reg &= PHY_ALL_1000_SPEED;
 
-		new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
-		new_adv |= ADVERTISE_CSMA;
-		new_adv |= bnx2_phy_get_pause_adv(bp);
-
 		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
-
 		if ((adv1000_reg != new_adv1000) ||
 			(adv_reg != new_adv) ||
 			((bmcr & BMCR_ANENABLE) == 0)) {
@@ -2090,6 +2097,10 @@ __acquires(&bp->phy_lock)
 		return 0;
 	}
 
+	/* advertise nothing when forcing speed */
+	if (adv_reg != new_adv)
+		bnx2_write_phy(bp, bp->mii_adv, new_adv);
+
 	new_bmcr = 0;
 	if (bp->req_line_speed == SPEED_100) {
 		new_bmcr |= BMCR_SPEED100;
@@ -2341,9 +2352,15 @@ bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
 	}
 
 	/* ethernet@wirespeed */
-	bnx2_write_phy(bp, 0x18, 0x7007);
-	bnx2_read_phy(bp, 0x18, &val);
-	bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
+	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
+	bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
+	val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
+
+	/* auto-mdix */
+	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
+		val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
+
+	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
 	return 0;
 }
 
@@ -3234,7 +3251,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
 		if ((bp->dev->features & NETIF_F_RXHASH) &&
 		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
 		     L2_FHDR_STATUS_USE_RXHASH))
-			skb->rxhash = rx_hdr->l2_fhdr_hash;
+			skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
+				     PKT_HASH_TYPE_L3);
 
 		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
 		napi_gro_receive(&bnapi->napi, skb);
@@ -6865,6 +6883,12 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 	if (netif_carrier_ok(dev)) {
 		ethtool_cmd_speed_set(cmd, bp->line_speed);
 		cmd->duplex = bp->duplex;
+		if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
+			if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
+				cmd->eth_tp_mdix = ETH_TP_MDI_X;
+			else
+				cmd->eth_tp_mdix = ETH_TP_MDI;
+		}
 	}
 	else {
 		ethtool_cmd_speed_set(cmd, -1);
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 18cb2d23e56b..f1cf2c44e7ed 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6471,6 +6471,15 @@ struct l2_fhdr {
 
 #define BCM5708S_TX_ACTL3			0x17
 
+#define MII_BNX2_EXT_STATUS			0x11
+#define EXT_STATUS_MDIX				 (1 << 13)
+
+#define MII_BNX2_AUX_CTL			0x18
+#define AUX_CTL_MISC_CTL			 0x7007
+#define AUX_CTL_MISC_CTL_WIRESPEED		  (1 << 4)
+#define AUX_CTL_MISC_CTL_AUTOMDIX		  (1 << 9)
+#define AUX_CTL_MISC_CTL_WR			  (1 << 15)
+
 #define MII_BNX2_DSP_RW_PORT			0x15
 #define MII_BNX2_DSP_ADDRESS			0x17
 #define MII_BNX2_DSP_EXPAND_REG			 0x0f00
@@ -6844,6 +6853,7 @@ struct bnx2 {
 #define BNX2_PHY_FLAG_REMOTE_PHY_CAP		0x00000800
 #define BNX2_PHY_FLAG_FORCED_DOWN		0x00001000
 #define BNX2_PHY_FLAG_NO_PARALLEL		0x00002000
+#define BNX2_PHY_FLAG_MDIX			0x00004000
 
 	u32			mii_bmcr;
 	u32			mii_bmsr;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index ec6119089b82..391f29ef6d2e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -472,7 +472,7 @@ struct bnx2x_agg_info {
 	u16			vlan_tag;
 	u16			len_on_bd;
 	u32			rxhash;
-	bool			l4_rxhash;
+	enum pkt_hash_types	rxhash_type;
 	u16			gro_size;
 	u16			full_page;
 };
@@ -1566,6 +1566,7 @@ struct bnx2x {
 #define NO_ISCSI_FLAG			(1 << 14)
 #define NO_FCOE_FLAG			(1 << 15)
 #define BC_SUPPORTS_PFC_STATS		(1 << 17)
+#define TX_SWITCHING			(1 << 18)
 #define BC_SUPPORTS_FCOE_FEATURES	(1 << 19)
 #define USING_SINGLE_MSIX_FLAG		(1 << 20)
 #define BC_SUPPORTS_DCBX_MSG_NON_PMF	(1 << 21)
@@ -1573,6 +1574,7 @@ struct bnx2x {
 #define INTERRUPTS_ENABLED_FLAG		(1 << 23)
 #define BC_SUPPORTS_RMMOD_CMD		(1 << 24)
 #define HAS_PHYS_PORT_ID		(1 << 25)
+#define AER_ENABLED			(1 << 26)
 
 #define BP_NOMCP(bp)			((bp)->flags & NO_MCP_FLAG)
 
@@ -2080,7 +2082,6 @@ int bnx2x_del_all_macs(struct bnx2x *bp,
 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
 		    u8 vf_valid, int fw_sb_id, int igu_sb_id);
-u32 bnx2x_get_pretend_reg(struct bnx2x *bp);
 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
@@ -2463,7 +2464,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
 
 #define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
 			    (!((me_reg) & ME_REG_VF_ERR)))
-int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code);
+int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err);
+
 /* Congestion management fairness mode */
 #define CMNG_FNS_NONE			0
 #define CMNG_FNS_MINMAX			1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index bf811565ee24..9d7419e0390b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -30,6 +30,43 @@
 #include "bnx2x_init.h"
 #include "bnx2x_sp.h"
 
+static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
+static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
+static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
+static int bnx2x_poll(struct napi_struct *napi, int budget);
+
+static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
+{
+	int i;
+
+	/* Add NAPI objects */
+	for_each_rx_queue_cnic(bp, i) {
+		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+			       bnx2x_poll, NAPI_POLL_WEIGHT);
+		napi_hash_add(&bnx2x_fp(bp, i, napi));
+	}
+}
+
+static void bnx2x_add_all_napi(struct bnx2x *bp)
+{
+	int i;
+
+	/* Add NAPI objects */
+	for_each_eth_queue(bp, i) {
+		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+			       bnx2x_poll, NAPI_POLL_WEIGHT);
+		napi_hash_add(&bnx2x_fp(bp, i, napi));
+	}
+}
+
+static int bnx2x_calc_num_queues(struct bnx2x *bp)
+{
+	return  bnx2x_num_queues ?
+		 min_t(int, bnx2x_num_queues, BNX2X_MAX_QUEUES(bp)) :
+		 min_t(int, netif_get_num_default_rss_queues(),
+		       BNX2X_MAX_QUEUES(bp));
+}
+
 /**
  * bnx2x_move_fp - move content of the fastpath structure.
  *
@@ -145,7 +182,7 @@ static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
 	}
 }
 
-int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
+int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
 
 /* free skb in the packet ring at pos idx
  * return idx of last bd freed
@@ -359,7 +396,7 @@ static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
  */
 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
 			    const struct eth_fast_path_rx_cqe *cqe,
-			    bool *l4_rxhash)
+			    enum pkt_hash_types *rxhash_type)
 {
 	/* Get Toeplitz hash from CQE */
 	if ((bp->dev->features & NETIF_F_RXHASH) &&
@@ -367,11 +404,13 @@ static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
 		enum eth_rss_hash_type htype;
 
 		htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
-		*l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
-			     (htype == TCP_IPV6_HASH_TYPE);
+		*rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
+				(htype == TCP_IPV6_HASH_TYPE)) ?
+			       PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
+
 		return le32_to_cpu(cqe->rss_hash_result);
 	}
-	*l4_rxhash = false;
+	*rxhash_type = PKT_HASH_TYPE_NONE;
 	return 0;
 }
 
@@ -425,7 +464,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
 	tpa_info->tpa_state = BNX2X_TPA_START;
 	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
 	tpa_info->placement_offset = cqe->placement_offset;
-	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
+	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
 	if (fp->mode == TPA_MODE_GRO) {
 		u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
 		tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
@@ -733,8 +772,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 
 		skb_reserve(skb, pad + NET_SKB_PAD);
 		skb_put(skb, len);
-		skb->rxhash = tpa_info->rxhash;
-		skb->l4_rxhash = tpa_info->l4_rxhash;
+		skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
 
 		skb->protocol = eth_type_trans(skb, bp->dev);
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -817,7 +855,7 @@ void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
 
-int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 {
 	struct bnx2x *bp = fp->bp;
 	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
@@ -851,7 +889,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 		enum eth_rx_cqe_type cqe_fp_type;
 		u16 len, pad, queue;
 		u8 *data;
-		bool l4_rxhash;
+		u32 rxhash;
+		enum pkt_hash_types rxhash_type;
 
 #ifdef BNX2X_STOP_ON_ERROR
 		if (unlikely(bp->panic))
@@ -992,8 +1031,8 @@ reuse_rx:
 		skb->protocol = eth_type_trans(skb, bp->dev);
 
 		/* Set Toeplitz hash for a none-LRO skb */
-		skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
-		skb->l4_rxhash = l4_rxhash;
+		rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
+		skb_set_hash(skb, rxhash, rxhash_type);
 
 		skb_checksum_none_assert(skb);
 
@@ -1486,7 +1525,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
 	}
 }
 
-void bnx2x_free_skbs_cnic(struct bnx2x *bp)
+static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
 {
 	bnx2x_free_tx_skbs_cnic(bp);
 	bnx2x_free_rx_skbs_cnic(bp);
@@ -2265,7 +2304,7 @@ static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
  * virtualized environments a pf from another VM may have already
  * initialized the device including loading FW
  */
-int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
+int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
 {
 	/* is another pf loaded on this engine? */
 	if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
@@ -2284,8 +2323,12 @@ int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
 
 		/* abort nic load if version mismatch */
 		if (my_fw != loaded_fw) {
-			BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
-				  loaded_fw, my_fw);
+			if (print_err)
+				BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
+					  loaded_fw, my_fw);
+			else
+				BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
+					       loaded_fw, my_fw);
 			return -EBUSY;
 		}
 	}
@@ -2298,16 +2341,16 @@ static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
 	int path = BP_PATH(bp);
 
 	DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
-	   path, load_count[path][0], load_count[path][1],
-	   load_count[path][2]);
-	load_count[path][0]++;
-	load_count[path][1 + port]++;
+	   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+	   bnx2x_load_count[path][2]);
+	bnx2x_load_count[path][0]++;
+	bnx2x_load_count[path][1 + port]++;
 	DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
-	   path, load_count[path][0], load_count[path][1],
-	   load_count[path][2]);
-	if (load_count[path][0] == 1)
+	   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+	   bnx2x_load_count[path][2]);
+	if (bnx2x_load_count[path][0] == 1)
 		return FW_MSG_CODE_DRV_LOAD_COMMON;
-	else if (load_count[path][1 + port] == 1)
+	else if (bnx2x_load_count[path][1 + port] == 1)
 		return FW_MSG_CODE_DRV_LOAD_PORT;
 	else
 		return FW_MSG_CODE_DRV_LOAD_FUNCTION;
@@ -2600,7 +2643,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 				LOAD_ERROR_EXIT(bp, load_error1);
 
 			/* what did mcp say? */
-			rc = bnx2x_nic_load_analyze_req(bp, load_code);
+			rc = bnx2x_compare_fw_ver(bp, load_code, true);
 			if (rc) {
 				bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
 				LOAD_ERROR_EXIT(bp, load_error2);
@@ -3065,7 +3108,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
 /*
  * net_device service functions
  */
-int bnx2x_poll(struct napi_struct *napi, int budget)
+static int bnx2x_poll(struct napi_struct *napi, int budget)
 {
 	int work_done = 0;
 	u8 cos;
@@ -4192,7 +4235,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
 	/* end of fastpath */
 }
 
-void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
+static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
 {
 	int i;
 	for_each_cnic_queue(bp, i)
@@ -4406,7 +4449,7 @@ alloc_mem_err:
 	return 0;
 }
 
-int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
+static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
 {
 	if (!NO_FCOE(bp))
 		/* FCoE */
@@ -4419,7 +4462,7 @@ int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
 	return 0;
 }
 
-int bnx2x_alloc_fp_mem(struct bnx2x *bp)
+static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
 {
 	int i;
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 41f3ca5ad972..17d1689aec6b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -26,10 +26,8 @@
 #include "bnx2x_sriov.h"
 
 /* This is used as a replacement for an MCP if it's not present */
-extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
-
-extern int num_queues;
-extern int int_mode;
+extern int bnx2x_load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
+extern int bnx2x_num_queues;
 
 /************************ Macros ********************************/
 #define BNX2X_PCI_FREE(x, y, size) \
@@ -417,35 +415,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
  * If bp->state is OPEN, should be called with
  * netif_addr_lock_bh()
  */
-void bnx2x_set_rx_mode(struct net_device *dev);
 void bnx2x_set_rx_mode_inner(struct bnx2x *bp);
 
-/**
- * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
- *
- * @bp:		driver handle
- *
- * If bp->state is OPEN, should be called with
- * netif_addr_lock_bh().
- */
-int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
-
-/**
- * bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
- *
- * @bp:			driver handle
- * @cl_id:		client id
- * @rx_mode_flags:	rx mode configuration
- * @rx_accept_flags:	rx accept configuration
- * @tx_accept_flags:	tx accept configuration (tx switch)
- * @ramrod_flags:	ramrod configuration
- */
-int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
-			unsigned long rx_mode_flags,
-			unsigned long rx_accept_flags,
-			unsigned long tx_accept_flags,
-			unsigned long ramrod_flags);
-
 /* Parity errors related */
 void bnx2x_set_pf_load(struct bnx2x *bp);
 bool bnx2x_clear_pf_load(struct bnx2x *bp);
@@ -565,9 +536,6 @@ int bnx2x_reload_if_running(struct net_device *dev);
 
 int bnx2x_change_mac_addr(struct net_device *dev, void *p);
 
-/* NAPI poll Rx part */
-int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
-
 /* NAPI poll Tx part */
 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
 
@@ -578,13 +546,9 @@ int bnx2x_resume(struct pci_dev *pdev);
 /* Release IRQ vectors */
 void bnx2x_free_irq(struct bnx2x *bp);
 
-void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
 void bnx2x_free_fp_mem(struct bnx2x *bp);
-int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
-int bnx2x_alloc_fp_mem(struct bnx2x *bp);
 void bnx2x_init_rx_rings(struct bnx2x *bp);
 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp);
-void bnx2x_free_skbs_cnic(struct bnx2x *bp);
 void bnx2x_free_skbs(struct bnx2x *bp);
 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
 void bnx2x_netif_start(struct bnx2x *bp);
@@ -608,15 +572,6 @@ int bnx2x_enable_msix(struct bnx2x *bp);
 int bnx2x_enable_msi(struct bnx2x *bp);
 
 /**
- * bnx2x_poll - NAPI callback
- *
- * @napi:	napi structure
- * @budget:
- *
- */
-int bnx2x_poll(struct napi_struct *napi, int budget);
-
-/**
  * bnx2x_low_latency_recv - LL callback
  *
  * @napi:	napi structure
@@ -862,30 +817,6 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
 	sge->addr_lo = 0;
 }
 
-static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
-{
-	int i;
-
-	/* Add NAPI objects */
-	for_each_rx_queue_cnic(bp, i) {
-		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
-			       bnx2x_poll, NAPI_POLL_WEIGHT);
-		napi_hash_add(&bnx2x_fp(bp, i, napi));
-	}
-}
-
-static inline void bnx2x_add_all_napi(struct bnx2x *bp)
-{
-	int i;
-
-	/* Add NAPI objects */
-	for_each_eth_queue(bp, i) {
-		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
-			       bnx2x_poll, NAPI_POLL_WEIGHT);
-		napi_hash_add(&bnx2x_fp(bp, i, napi));
-	}
-}
-
 static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
 {
 	int i;
@@ -919,14 +850,6 @@ static inline void bnx2x_disable_msi(struct bnx2x *bp)
 	}
 }
 
-static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
-{
-	return  num_queues ?
-		 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
-		 min_t(int, netif_get_num_default_rss_queues(),
-		       BNX2X_MAX_QUEUES(bp));
-}
-
 static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
 {
 	int i, j;
@@ -1173,8 +1096,6 @@ static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
 		return fp->cl_id;
 }
 
-u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
-
 static inline void bnx2x_init_txdata(struct bnx2x *bp,
 				     struct bnx2x_fp_txdata *txdata, u32 cid,
 				     int txq_index, __le16 *tx_cons_sb,
@@ -1207,47 +1128,6 @@ static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
 	return bp->igu_base_sb;
 }
 
-static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
-{
-	struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
-	unsigned long q_type = 0;
-
-	bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
-	bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
-						     BNX2X_FCOE_ETH_CL_ID_IDX);
-	bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
-	bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
-	bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
-	bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
-	bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
-			  fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
-			  fp);
-
-	DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
-
-	/* qZone id equals to FW (per path) client id */
-	bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
-	/* init shortcut */
-	bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
-		bnx2x_rx_ustorm_prods_offset(fp);
-
-	/* Configure Queue State object */
-	__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
-	__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
-
-	/* No multi-CoS for FCoE L2 client */
-	BUG_ON(fp->max_cos != 1);
-
-	bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
-			     &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
-			     bnx2x_sp_mapping(bp, q_rdata), q_type);
-
-	DP(NETIF_MSG_IFUP,
-	   "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
-	   fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
-	   fp->igu_sb_id);
-}
-
 static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
 				       struct bnx2x_fp_txdata *txdata)
 {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 32d0f1435fb4..92a467ff4104 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1639,6 +1639,12 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
 
 		memcpy(&val, data_buf, 4);
 
+		/* Notice unlike bnx2x_nvram_read_dword() this will not
+		 * change val using be32_to_cpu(), which causes data to flip
+		 * if the eeprom is read and then written back. This is due
+		 * to tools utilizing this functionality that would break
+		 * if this would be resolved.
+		 */
 		rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
 
 		/* advance to the next dword */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 11fc79585491..9b6b3d7304b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -205,6 +205,11 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
 		(_bank + (_addr & 0xf)), \
 		_val)
 
+static int bnx2x_check_half_open_conn(struct link_params *params,
+				      struct link_vars *vars, u8 notify);
+static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+				      struct link_params *params);
+
 static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
 {
 	u32 val = REG_RD(bp, reg);
@@ -1399,57 +1404,6 @@ static void bnx2x_update_pfc_xmac(struct link_params *params,
 	udelay(30);
 }
 
-
-static void bnx2x_emac_get_pfc_stat(struct link_params *params,
-				    u32 pfc_frames_sent[2],
-				    u32 pfc_frames_received[2])
-{
-	/* Read pfc statistic */
-	struct bnx2x *bp = params->bp;
-	u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
-	u32 val_xon = 0;
-	u32 val_xoff = 0;
-
-	DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n");
-
-	/* PFC received frames */
-	val_xoff = REG_RD(bp, emac_base +
-				EMAC_REG_RX_PFC_STATS_XOFF_RCVD);
-	val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT;
-	val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
-	val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT;
-
-	pfc_frames_received[0] = val_xon + val_xoff;
-
-	/* PFC received sent */
-	val_xoff = REG_RD(bp, emac_base +
-				EMAC_REG_RX_PFC_STATS_XOFF_SENT);
-	val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT;
-	val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
-	val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT;
-
-	pfc_frames_sent[0] = val_xon + val_xoff;
-}
-
-/* Read pfc statistic*/
-void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
-			 u32 pfc_frames_sent[2],
-			 u32 pfc_frames_received[2])
-{
-	/* Read pfc statistic */
-	struct bnx2x *bp = params->bp;
-
-	DP(NETIF_MSG_LINK, "pfc statistic\n");
-
-	if (!vars->link_up)
-		return;
-
-	if (vars->mac_type == MAC_TYPE_EMAC) {
-		DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
-		bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
-					pfc_frames_received);
-	}
-}
 /******************************************************************/
 /*			MAC/PBF section				  */
 /******************************************************************/
@@ -8648,8 +8602,8 @@ static void bnx2x_set_limiting_mode(struct link_params *params,
 	}
 }
 
-int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
-			       struct link_params *params)
+static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+				      struct link_params *params)
 {
 	struct bnx2x *bp = params->bp;
 	u16 edc_mode;
@@ -13413,9 +13367,9 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
 *	a fault, for example, due to break in the TX side of fiber.
 *
 ******************************************************************************/
-int bnx2x_check_half_open_conn(struct link_params *params,
-				struct link_vars *vars,
-				u8 notify)
+static int bnx2x_check_half_open_conn(struct link_params *params,
+				      struct link_vars *vars,
+				      u8 notify)
 {
 	struct bnx2x *bp = params->bp;
 	u32 lss_status = 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 4df45234fdc0..389f5f8cb0a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -533,19 +533,11 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
 int bnx2x_ets_e3b0_config(const struct link_params *params,
 			 const struct link_vars *vars,
 			 struct bnx2x_ets_params *ets_params);
-/* Read pfc statistic*/
-void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
-						 u32 pfc_frames_sent[2],
-						 u32 pfc_frames_received[2]);
+
 void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
 			    u32 chip_id, u32 shmem_base, u32 shmem2_base,
 			    u8 port);
 
-int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
-			       struct link_params *params);
-
 void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
 
-int bnx2x_check_half_open_conn(struct link_params *params,
-			       struct link_vars *vars, u8 notify);
 #endif /* BNX2X_LINK_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 0067b975873f..e118a3ec62bc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
+#include <linux/aer.h>
 #include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -93,8 +94,8 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1);
 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
 MODULE_FIRMWARE(FW_FILE_NAME_E2);
 
-int num_queues;
-module_param(num_queues, int, 0);
+int bnx2x_num_queues;
+module_param_named(num_queues, bnx2x_num_queues, int, 0);
 MODULE_PARM_DESC(num_queues,
 		 " Set number of queues (default is as a number of CPUs)");
 
@@ -102,7 +103,7 @@ static int disable_tpa;
 module_param(disable_tpa, int, 0);
 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
 
-int int_mode;
+static int int_mode;
 module_param(int_mode, int, 0);
 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
 				"(1 INT#x; 2 MSI)");
@@ -278,6 +279,12 @@ MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
 #define BNX2X_PREV_WAIT_NEEDED 1
 static DEFINE_SEMAPHORE(bnx2x_prev_sem);
 static LIST_HEAD(bnx2x_prev_list);
+
+/* Forward declaration */
+static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
+static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
+static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
+
 /****************************************************************************
 * General service functions
 ****************************************************************************/
@@ -3000,6 +3007,9 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
 	if (zero_stats)
 		__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
 
+	if (bp->flags & TX_SWITCHING)
+		__set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
+
 	__set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
 	__set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
 
@@ -3297,6 +3307,10 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
 
 	ether_stat->txq_size = bp->tx_ring_size;
 	ether_stat->rxq_size = bp->rx_ring_size;
+
+#ifdef CONFIG_BNX2X_SRIOV
+	ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
+#endif
 }
 
 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
@@ -5852,11 +5866,11 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
 }
 
 /* called with netif_addr_lock_bh() */
-int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
-			unsigned long rx_mode_flags,
-			unsigned long rx_accept_flags,
-			unsigned long tx_accept_flags,
-			unsigned long ramrod_flags)
+static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+			       unsigned long rx_mode_flags,
+			       unsigned long rx_accept_flags,
+			       unsigned long tx_accept_flags,
+			       unsigned long ramrod_flags)
 {
 	struct bnx2x_rx_mode_ramrod_params ramrod_param;
 	int rc;
@@ -5964,7 +5978,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
 }
 
 /* called with netif_addr_lock_bh() */
-int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
 {
 	unsigned long rx_mode_flags = 0, ramrod_flags = 0;
 	unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
@@ -6160,6 +6174,47 @@ static void bnx2x_init_tx_rings(struct bnx2x *bp)
 			bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
 }
 
+static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
+{
+	struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+	unsigned long q_type = 0;
+
+	bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
+	bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
+						     BNX2X_FCOE_ETH_CL_ID_IDX);
+	bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
+	bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
+	bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
+	bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
+	bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
+			  fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
+			  fp);
+
+	DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
+
+	/* qZone id equals to FW (per path) client id */
+	bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
+	/* init shortcut */
+	bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
+		bnx2x_rx_ustorm_prods_offset(fp);
+
+	/* Configure Queue State object */
+	__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+	__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+	/* No multi-CoS for FCoE L2 client */
+	BUG_ON(fp->max_cos != 1);
+
+	bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
+			     &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+			     bnx2x_sp_mapping(bp, q_rdata), q_type);
+
+	DP(NETIF_MSG_IFUP,
+	   "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
+	   fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+	   fp->igu_sb_id);
+}
+
 void bnx2x_nic_init_cnic(struct bnx2x *bp)
 {
 	if (!NO_FCOE(bp))
@@ -8732,16 +8787,16 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
 		int path = BP_PATH(bp);
 
 		DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      %d, %d, %d\n",
-		   path, load_count[path][0], load_count[path][1],
-		   load_count[path][2]);
-		load_count[path][0]--;
-		load_count[path][1 + port]--;
+		   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+		   bnx2x_load_count[path][2]);
+		bnx2x_load_count[path][0]--;
+		bnx2x_load_count[path][1 + port]--;
 		DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  %d, %d, %d\n",
-		   path, load_count[path][0], load_count[path][1],
-		   load_count[path][2]);
-		if (load_count[path][0] == 0)
+		   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
+		   bnx2x_load_count[path][2]);
+		if (bnx2x_load_count[path][0] == 0)
 			reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
-		else if (load_count[path][1 + port] == 0)
+		else if (bnx2x_load_count[path][1 + port] == 0)
 			reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
 		else
 			reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -9767,7 +9822,7 @@ period_task_exit:
  * Init service functions
  */
 
-u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
 {
 	u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
 	u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
@@ -9854,6 +9909,64 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
 #define BNX2X_PREV_UNDI_BD(val)		((val) >> 16 & 0xffff)
 #define BNX2X_PREV_UNDI_PROD(rcq, bd)	((bd) << 16 | (rcq))
 
+#define BCM_5710_UNDI_FW_MF_MAJOR	(0x07)
+#define BCM_5710_UNDI_FW_MF_MINOR	(0x08)
+#define BCM_5710_UNDI_FW_MF_VERS	(0x05)
+#define BNX2X_PREV_UNDI_MF_PORT(p)	(0x1a150c + ((p) << 4))
+#define BNX2X_PREV_UNDI_MF_FUNC(f)	(0x1a184c + ((f) << 4))
+static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
+{
+	u8 major, minor, version;
+	u32 fw;
+
+	/* Must check that FW is loaded */
+	if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
+	     MISC_REGISTERS_RESET_REG_1_RST_XSEM)) {
+		BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n");
+		return false;
+	}
+
+	/* Read Currently loaded FW version */
+	fw = REG_RD(bp, XSEM_REG_PRAM);
+	major = fw & 0xff;
+	minor = (fw >> 0x8) & 0xff;
+	version = (fw >> 0x10) & 0xff;
+	BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n",
+		       fw, major, minor, version);
+
+	if (major > BCM_5710_UNDI_FW_MF_MAJOR)
+		return true;
+
+	if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
+	    (minor > BCM_5710_UNDI_FW_MF_MINOR))
+		return true;
+
+	if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
+	    (minor == BCM_5710_UNDI_FW_MF_MINOR) &&
+	    (version >= BCM_5710_UNDI_FW_MF_VERS))
+		return true;
+
+	return false;
+}
+
+static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp)
+{
+	int i;
+
+	/* Due to legacy (FW) code, the first function on each engine has a
+	 * different offset macro from the rest of the functions.
+	 * Setting this for all 8 functions is harmless regardless of whether
+	 * this is actually a multi-function device.
+	 */
+	for (i = 0; i < 2; i++)
+		REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1);
+
+	for (i = 2; i < 8; i++)
+		REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1);
+
+	BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n");
+}
+
 static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
 {
 	u16 rcq, bd;
@@ -10054,7 +10167,7 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
 	 * the one required, then FLR will be sufficient to clean any residue
 	 * left by previous driver
 	 */
-	rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION);
+	rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
 
 	if (!rc) {
 		/* fw version is good */
@@ -10142,10 +10255,17 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
 			else
 				timer_count--;
 
-			/* If UNDI resides in memory, manually increment it */
-			if (prev_undi)
+			/* New UNDI FW supports MF and contains better
+			 * cleaning methods - might be redundant but harmless.
+			 */
+			if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
+				bnx2x_prev_unload_undi_mf(bp);
+			} else if (prev_undi) {
+				/* If UNDI resides in memory,
+				 * manually increment it
+				 */
 				bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
-
+			}
 			udelay(10);
 		}
 
@@ -10265,8 +10385,8 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
 	} while (--time_counter);
 
 	if (!time_counter || rc) {
-		BNX2X_ERR("Failed unloading previous driver, aborting\n");
-		rc = -EBUSY;
+		BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
+		rc = -EPROBE_DEFER;
 	}
 
 	/* Mark function if its port was used to boot from SAN */
@@ -11636,7 +11756,11 @@ static int bnx2x_init_bp(struct bnx2x *bp)
 							DRV_MSG_SEQ_NUMBER_MASK;
 		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
 
-		bnx2x_prev_unload(bp);
+		rc = bnx2x_prev_unload(bp);
+		if (rc) {
+			bnx2x_free_mem_bp(bp);
+			return rc;
+		}
 	}
 
 	if (CHIP_REV_IS_FPGA(bp))
@@ -11931,7 +12055,7 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
 }
 
 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
-void bnx2x_set_rx_mode(struct net_device *dev)
+static void bnx2x_set_rx_mode(struct net_device *dev)
 {
 	struct bnx2x *bp = netdev_priv(dev);
 
@@ -12156,6 +12280,14 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
 	return 0;
 }
 
+static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
+{
+	if (bp->flags & AER_ENABLED) {
+		pci_disable_pcie_error_reporting(bp->pdev);
+		bp->flags &= ~AER_ENABLED;
+	}
+}
+
 static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
 			  struct net_device *dev, unsigned long board_type)
 {
@@ -12262,6 +12394,14 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
 	/* clean indirect addresses */
 	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
 			       PCICFG_VENDOR_ID_OFFSET);
+
+	/* AER (Advanced Error reporting) configuration */
+	rc = pci_enable_pcie_error_reporting(pdev);
+	if (!rc)
+		bp->flags |= AER_ENABLED;
+	else
+		BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
+
 	/*
 	 * Clean the following indirect addresses for all functions since it
 	 * is not used by the driver.
@@ -12693,8 +12833,6 @@ static int set_is_vf(int chip_id)
 	}
 }
 
-struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
-
 static int bnx2x_init_one(struct pci_dev *pdev,
 				    const struct pci_device_id *ent)
 {
@@ -12869,6 +13007,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
 	return 0;
 
 init_one_exit:
+	bnx2x_disable_pcie_error_reporting(bp);
+
 	if (bp->regview)
 		iounmap(bp->regview);
 
@@ -12942,6 +13082,7 @@ static void __bnx2x_remove(struct pci_dev *pdev,
 		pci_set_power_state(pdev, PCI_D3hot);
 	}
 
+	bnx2x_disable_pcie_error_reporting(bp);
 	if (remove_netdev) {
 		if (bp->regview)
 			iounmap(bp->regview);
@@ -13120,6 +13261,14 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
 
 	rtnl_unlock();
 
+	/* If AER, perform cleanup of the PCIe registers */
+	if (bp->flags & AER_ENABLED) {
+		if (pci_cleanup_aer_uncorrect_error_status(pdev))
+			BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
+		else
+			DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
+	}
+
 	return PCI_ERS_RESULT_RECOVERED;
 }
 
@@ -13758,7 +13907,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
 	return 0;
 }
 
-struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
+static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
 {
 	struct bnx2x *bp = netdev_priv(dev);
 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
@@ -13808,7 +13957,7 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
 	return cp;
 }
 
-u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
+static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
 {
 	struct bnx2x *bp = fp->bp;
 	u32 offset = BAR_USTRORM_INTMEM;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 14ffb6e56e59..2beb5430b876 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -5932,6 +5932,7 @@
 #define MISC_REGISTERS_RESET_REG_1_RST_NIG			 (0x1<<7)
 #define MISC_REGISTERS_RESET_REG_1_RST_PXP			 (0x1<<26)
 #define MISC_REGISTERS_RESET_REG_1_RST_PXPV			 (0x1<<27)
+#define MISC_REGISTERS_RESET_REG_1_RST_XSEM			 (0x1<<22)
 #define MISC_REGISTERS_RESET_REG_1_SET				 0x584
 #define MISC_REGISTERS_RESET_REG_2_CLEAR			 0x598
 #define MISC_REGISTERS_RESET_REG_2_MSTAT0			 (0x1<<24)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 18438a504d57..0fb6ff2ac8e3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -355,23 +355,6 @@ static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
 
 	return vp->get(vp, 1);
 }
-
-static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
-{
-	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
-	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
-
-	if (!mp->get(mp, 1))
-		return false;
-
-	if (!vp->get(vp, 1)) {
-		mp->put(mp, 1);
-		return false;
-	}
-
-	return true;
-}
-
 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
 {
 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
@@ -400,22 +383,6 @@ static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
 	return vp->put(vp, 1);
 }
 
-static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
-{
-	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
-	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
-
-	if (!mp->put(mp, 1))
-		return false;
-
-	if (!vp->put(vp, 1)) {
-		mp->get(mp, 1);
-		return false;
-	}
-
-	return true;
-}
-
 /**
  * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
  *
@@ -507,22 +474,6 @@ static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
 	}
 }
 
-/**
- * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
- *
- * @bp:			device handle
- * @o:			vlan_mac object
- *
- * @details Notice if a pending execution exists, it would perform it -
- *          possibly releasing and reclaiming the execution queue lock.
- */
-void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
-				   struct bnx2x_vlan_mac_obj *o)
-{
-	spin_lock_bh(&o->exe_queue.lock);
-	__bnx2x_vlan_mac_h_write_unlock(bp, o);
-	spin_unlock_bh(&o->exe_queue.lock);
-}
 
 /**
  * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
@@ -663,7 +614,7 @@ static int bnx2x_check_mac_add(struct bnx2x *bp,
 
 	/* Check if a requested MAC already exists */
 	list_for_each_entry(pos, &o->head, link)
-		if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
+		if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
 		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
 			return -EEXIST;
 
@@ -685,26 +636,6 @@ static int bnx2x_check_vlan_add(struct bnx2x *bp,
 	return 0;
 }
 
-static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
-				    struct bnx2x_vlan_mac_obj *o,
-				   union bnx2x_classification_ramrod_data *data)
-{
-	struct bnx2x_vlan_mac_registry_elem *pos;
-
-	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
-	   data->vlan_mac.mac, data->vlan_mac.vlan);
-
-	list_for_each_entry(pos, &o->head, link)
-		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
-		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
-				  ETH_ALEN)) &&
-		    (data->vlan_mac.is_inner_mac ==
-		     pos->u.vlan_mac.is_inner_mac))
-			return -EEXIST;
-
-	return 0;
-}
-
 /* check_del() callbacks */
 static struct bnx2x_vlan_mac_registry_elem *
 	bnx2x_check_mac_del(struct bnx2x *bp,
@@ -716,7 +647,7 @@ static struct bnx2x_vlan_mac_registry_elem *
 	DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
 
 	list_for_each_entry(pos, &o->head, link)
-		if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
+		if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
 		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
 			return pos;
 
@@ -739,27 +670,6 @@ static struct bnx2x_vlan_mac_registry_elem *
 	return NULL;
 }
 
-static struct bnx2x_vlan_mac_registry_elem *
-	bnx2x_check_vlan_mac_del(struct bnx2x *bp,
-				 struct bnx2x_vlan_mac_obj *o,
-				 union bnx2x_classification_ramrod_data *data)
-{
-	struct bnx2x_vlan_mac_registry_elem *pos;
-
-	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
-	   data->vlan_mac.mac, data->vlan_mac.vlan);
-
-	list_for_each_entry(pos, &o->head, link)
-		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
-		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
-			     ETH_ALEN)) &&
-		    (data->vlan_mac.is_inner_mac ==
-		     pos->u.vlan_mac.is_inner_mac))
-			return pos;
-
-	return NULL;
-}
-
 /* check_move() callback */
 static bool bnx2x_check_move(struct bnx2x *bp,
 			     struct bnx2x_vlan_mac_obj *src_o,
@@ -811,8 +721,8 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
 	return rx_tx_flag;
 }
 
-void bnx2x_set_mac_in_nig(struct bnx2x *bp,
-			  bool add, unsigned char *dev_addr, int index)
+static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+				 bool add, unsigned char *dev_addr, int index)
 {
 	u32 wb_data[2];
 	u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
@@ -1126,97 +1036,6 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
 					rule_cnt);
 }
 
-static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
-				      struct bnx2x_vlan_mac_obj *o,
-				      struct bnx2x_exeq_elem *elem,
-				      int rule_idx, int cam_offset)
-{
-	struct bnx2x_raw_obj *raw = &o->raw;
-	struct eth_classify_rules_ramrod_data *data =
-		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
-	int rule_cnt = rule_idx + 1;
-	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
-	enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
-	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
-	u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
-	u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
-
-	/* Reset the ramrod data buffer for the first rule */
-	if (rule_idx == 0)
-		memset(data, 0, sizeof(*data));
-
-	/* Set a rule header */
-	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
-				      &rule_entry->pair.header);
-
-	/* Set VLAN and MAC themselves */
-	rule_entry->pair.vlan = cpu_to_le16(vlan);
-	bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
-			      &rule_entry->pair.mac_mid,
-			      &rule_entry->pair.mac_lsb, mac);
-	rule_entry->pair.inner_mac =
-		cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
-	/* MOVE: Add a rule that will add this MAC to the target Queue */
-	if (cmd == BNX2X_VLAN_MAC_MOVE) {
-		rule_entry++;
-		rule_cnt++;
-
-		/* Setup ramrod data */
-		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
-					elem->cmd_data.vlan_mac.target_obj,
-					      true, CLASSIFY_RULE_OPCODE_PAIR,
-					      &rule_entry->pair.header);
-
-		/* Set a VLAN itself */
-		rule_entry->pair.vlan = cpu_to_le16(vlan);
-		bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
-				      &rule_entry->pair.mac_mid,
-				      &rule_entry->pair.mac_lsb, mac);
-		rule_entry->pair.inner_mac =
-			cpu_to_le16(elem->cmd_data.vlan_mac.u.
-						vlan_mac.is_inner_mac);
-	}
-
-	/* Set the ramrod data header */
-	/* TODO: take this to the higher level in order to prevent multiple
-		 writing */
-	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
-					rule_cnt);
-}
-
-/**
- * bnx2x_set_one_vlan_mac_e1h -
- *
- * @bp:		device handle
- * @o:		bnx2x_vlan_mac_obj
- * @elem:	bnx2x_exeq_elem
- * @rule_idx:	rule_idx
- * @cam_offset:	cam_offset
- */
-static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
-				       struct bnx2x_vlan_mac_obj *o,
-				       struct bnx2x_exeq_elem *elem,
-				       int rule_idx, int cam_offset)
-{
-	struct bnx2x_raw_obj *raw = &o->raw;
-	struct mac_configuration_cmd *config =
-		(struct mac_configuration_cmd *)(raw->rdata);
-	/* 57710 and 57711 do not support MOVE command,
-	 * so it's either ADD or DEL
-	 */
-	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
-		true : false;
-
-	/* Reset the ramrod data buffer */
-	memset(config, 0, sizeof(*config));
-
-	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
-				     cam_offset, add,
-				     elem->cmd_data.vlan_mac.u.vlan_mac.mac,
-				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
-				     ETH_VLAN_FILTER_CLASSIFY, config);
-}
-
 /**
  * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
  *
@@ -1316,24 +1135,6 @@ static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
 	return NULL;
 }
 
-static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
-	struct bnx2x_exe_queue_obj *o,
-	struct bnx2x_exeq_elem *elem)
-{
-	struct bnx2x_exeq_elem *pos;
-	struct bnx2x_vlan_mac_ramrod_data *data =
-		&elem->cmd_data.vlan_mac.u.vlan_mac;
-
-	/* Check pending for execution commands */
-	list_for_each_entry(pos, &o->exe_queue, link)
-		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
-			      sizeof(*data)) &&
-		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
-			return pos;
-
-	return NULL;
-}
-
 /**
  * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
  *
@@ -2241,69 +2042,6 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
 	}
 }
 
-void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
-			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
-			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
-			     dma_addr_t rdata_mapping, int state,
-			     unsigned long *pstate, bnx2x_obj_type type,
-			     struct bnx2x_credit_pool_obj *macs_pool,
-			     struct bnx2x_credit_pool_obj *vlans_pool)
-{
-	union bnx2x_qable_obj *qable_obj =
-		(union bnx2x_qable_obj *)vlan_mac_obj;
-
-	bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
-				   rdata_mapping, state, pstate, type,
-				   macs_pool, vlans_pool);
-
-	/* CAM pool handling */
-	vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
-	vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
-	/* CAM offset is relevant for 57710 and 57711 chips only which have a
-	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
-	 * will be taken from MACs' pool object only.
-	 */
-	vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
-	vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
-
-	if (CHIP_IS_E1(bp)) {
-		BNX2X_ERR("Do not support chips others than E2\n");
-		BUG();
-	} else if (CHIP_IS_E1H(bp)) {
-		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
-		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
-		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
-		vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
-		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
-
-		/* Exe Queue */
-		bnx2x_exe_queue_init(bp,
-				     &vlan_mac_obj->exe_queue, 1, qable_obj,
-				     bnx2x_validate_vlan_mac,
-				     bnx2x_remove_vlan_mac,
-				     bnx2x_optimize_vlan_mac,
-				     bnx2x_execute_vlan_mac,
-				     bnx2x_exeq_get_vlan_mac);
-	} else {
-		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
-		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
-		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
-		vlan_mac_obj->check_move        = bnx2x_check_move;
-		vlan_mac_obj->ramrod_cmd        =
-			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
-
-		/* Exe Queue */
-		bnx2x_exe_queue_init(bp,
-				     &vlan_mac_obj->exe_queue,
-				     CLASSIFY_RULES_COUNT,
-				     qable_obj, bnx2x_validate_vlan_mac,
-				     bnx2x_remove_vlan_mac,
-				     bnx2x_optimize_vlan_mac,
-				     bnx2x_execute_vlan_mac,
-				     bnx2x_exeq_get_vlan_mac);
-	}
-}
-
 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
 			struct tstorm_eth_mac_filter_config *mac_filters,
@@ -4990,6 +4728,13 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp,
 		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
 	data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
 	data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
+
+	/* tx switching */
+	data->tx_switching_flg =
+		test_bit(BNX2X_Q_UPDATE_TX_SWITCHING, &params->update_flags);
+	data->tx_switching_change_flg =
+		test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+			 &params->update_flags);
 }
 
 static inline int bnx2x_q_send_update(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 6a53c15c85a3..00d7f214a40a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -448,9 +448,6 @@ enum {
 	BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
 };
 
-void bnx2x_set_mac_in_nig(struct bnx2x *bp,
-			  bool add, unsigned char *dev_addr, int index);
-
 /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
 
 /* RX_MODE ramrod special flags: set in rx_mode_flags field in
@@ -770,7 +767,9 @@ enum {
 	BNX2X_Q_UPDATE_DEF_VLAN_EN,
 	BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
 	BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
-	BNX2X_Q_UPDATE_SILENT_VLAN_REM
+	BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+	BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+	BNX2X_Q_UPDATE_TX_SWITCHING
 };
 
 /* Allowed Queue states */
@@ -1307,22 +1306,12 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
 			 unsigned long *pstate, bnx2x_obj_type type,
 			 struct bnx2x_credit_pool_obj *vlans_pool);
 
-void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
-			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
-			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
-			     dma_addr_t rdata_mapping, int state,
-			     unsigned long *pstate, bnx2x_obj_type type,
-			     struct bnx2x_credit_pool_obj *macs_pool,
-			     struct bnx2x_credit_pool_obj *vlans_pool);
-
 int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
 					struct bnx2x_vlan_mac_obj *o);
 void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
 				  struct bnx2x_vlan_mac_obj *o);
 int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp,
 				struct bnx2x_vlan_mac_obj *o);
-void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
-					  struct bnx2x_vlan_mac_obj *o);
 int bnx2x_config_vlan_mac(struct bnx2x *bp,
 			   struct bnx2x_vlan_mac_ramrod_params *p);
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index e7845e5be1c7..aec5ef2ed7ce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -166,6 +166,7 @@ enum bnx2x_vfop_qteardown_state {
 	   BNX2X_VFOP_QTEARDOWN_RXMODE,
 	   BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
 	   BNX2X_VFOP_QTEARDOWN_CLR_MAC,
+	   BNX2X_VFOP_QTEARDOWN_CLR_MCAST,
 	   BNX2X_VFOP_QTEARDOWN_QDTOR,
 	   BNX2X_VFOP_QTEARDOWN_DONE
 };
@@ -617,7 +618,7 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
 					   &vlan_mac->user_req.vlan_mac_flags,
 					   &vlan_mac->ramrod_flags);
 
-		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
 
 	case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
 		/* next state */
@@ -628,7 +629,7 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
 		if (vfop->rc == -EEXIST)
 			vfop->rc = 0;
 
-		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
 
 	case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
 		vfop->rc = !!obj->raw.check_pending(&obj->raw);
@@ -645,7 +646,7 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
 
 		set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
 		vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
-		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
 
 	case BNX2X_VFOP_VLAN_CONFIG_LIST:
 		/* next state */
@@ -657,7 +658,7 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
 			set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
 			vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
 		}
-		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
 
 	default:
 		bnx2x_vfop_default(state);
@@ -798,10 +799,10 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
 	return -ENOMEM;
 }
 
-int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
-			    struct bnx2x_virtf *vf,
-			    struct bnx2x_vfop_cmd *cmd,
-			    int qid, u16 vid, bool add)
+static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
+				   struct bnx2x_virtf *vf,
+				   struct bnx2x_vfop_cmd *cmd,
+				   int qid, u16 vid, bool add)
 {
 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
 	int rc;
@@ -1023,25 +1024,35 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
 	case BNX2X_VFOP_QFLR_CLR_VLAN:
 		/* vlan-clear-all: driver-only, don't consume credit */
 		vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
-		if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)))
-			vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid,
-							      true);
-		if (vfop->rc)
-			goto op_err;
-		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+
+		if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) {
+			/* the vlan_mac vfop will re-schedule us */
+			vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd,
+							      qid, true);
+			if (vfop->rc)
+				goto op_err;
+			return;
+
+		} else {
+			/* need to reschedule ourselves */
+			bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+		}
 
 	case BNX2X_VFOP_QFLR_CLR_MAC:
 		/* mac-clear-all: driver only consume credit */
 		vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
-		if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)))
-			vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid,
-							     true);
-		DP(BNX2X_MSG_IOV,
-		   "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
-		   vf->abs_vfid, vfop->rc);
-		if (vfop->rc)
-			goto op_err;
-		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+		if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) {
+			/* the vlan_mac vfop will re-schedule us */
+			vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd,
+							     qid, true);
+			if (vfop->rc)
+				goto op_err;
+			return;
+
+		} else {
+			/* need to reschedule ourselves */
+			bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+		}
 
 	case BNX2X_VFOP_QFLR_TERMINATE:
 		qstate = &vfop->op_p->qctor.qstate;
@@ -1112,7 +1123,10 @@ static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
 	switch (state) {
 	case BNX2X_VFOP_MCAST_DEL:
 		/* clear existing mcasts */
-		vfop->state = BNX2X_VFOP_MCAST_ADD;
+		vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD
+					     : BNX2X_VFOP_MCAST_CHK_DONE;
+		mcast->mcast_list_len = vf->mcast_list_len;
+		vf->mcast_list_len = args->mc_num;
 		vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
 
@@ -1120,17 +1134,17 @@ static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
 		if (raw->check_pending(raw))
 			goto op_pending;
 
-		if (args->mc_num) {
-			/* update mcast list on the ramrod params */
-			INIT_LIST_HEAD(&mcast->mcast_list);
-			for (i = 0; i < args->mc_num; i++)
-				list_add_tail(&(args->mc[i].link),
-					      &mcast->mcast_list);
-			/* add new mcasts */
-			vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
-			vfop->rc = bnx2x_config_mcast(bp, mcast,
-						      BNX2X_MCAST_CMD_ADD);
-		}
+		/* update mcast list on the ramrod params */
+		INIT_LIST_HEAD(&mcast->mcast_list);
+		for (i = 0; i < args->mc_num; i++)
+			list_add_tail(&(args->mc[i].link),
+				      &mcast->mcast_list);
+		mcast->mcast_list_len = args->mc_num;
+
+		/* add new mcasts */
+		vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
+		vfop->rc = bnx2x_config_mcast(bp, mcast,
+					      BNX2X_MCAST_CMD_ADD);
 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
 
 	case BNX2X_VFOP_MCAST_CHK_DONE:
@@ -1312,12 +1326,19 @@ static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
 
 	case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
 		/* mac-clear-all: consume credit */
-		vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
+		vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST;
 		vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
 		if (vfop->rc)
 			goto op_err;
 		return;
 
+	case BNX2X_VFOP_QTEARDOWN_CLR_MCAST:
+		vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
+		vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
+		if (vfop->rc)
+			goto op_err;
+		return;
+
 	case BNX2X_VFOP_QTEARDOWN_QDTOR:
 		/* run the queue destruction flow */
 		DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
@@ -2197,6 +2218,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
 		 *  It needs to be initialized here so that it can be safely
 		 *  handled by a subsequent FLR flow.
 		 */
+		vf->mcast_list_len = 0;
 		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
 				     0xFF, 0xFF, 0xFF,
 				     bnx2x_vf_sp(bp, vf, mcast_rdata),
@@ -2372,8 +2394,9 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
 		goto get_vf;
 	case EVENT_RING_OPCODE_MALICIOUS_VF:
 		abs_vfid = elem->message.data.malicious_vf_event.vf_id;
-		DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
-		   abs_vfid, elem->message.data.malicious_vf_event.err_id);
+		BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
+			  abs_vfid,
+			  elem->message.data.malicious_vf_event.err_id);
 		goto get_vf;
 	default:
 		return 1;
@@ -2425,15 +2448,9 @@ get_vf:
 		bnx2x_vf_handle_filters_eqe(bp, vf);
 		break;
 	case EVENT_RING_OPCODE_VF_FLR:
-		DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
-		   vf->abs_vfid);
-		/* Do nothing for now */
-		break;
 	case EVENT_RING_OPCODE_MALICIOUS_VF:
-		DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n",
-		   abs_vfid, elem->message.data.malicious_vf_event.err_id);
 		/* Do nothing for now */
-		break;
+		return 0;
 	}
 	/* SRIOV: reschedule any 'in_progress' operations */
 	bnx2x_iov_sp_event(bp, cid, false);
@@ -2857,13 +2874,9 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
 				goto op_err;
 			return;
 		}
-
-		/* remove multicasts */
 		vfop->state = BNX2X_VFOP_CLOSE_HW;
-		vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
-		if (vfop->rc)
-			goto op_err;
-		return;
+		vfop->rc = 0;
+		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
 
 	case BNX2X_VFOP_CLOSE_HW:
 
@@ -2897,6 +2910,9 @@ op_done:
 
 	DP(BNX2X_MSG_IOV, "set state to acquired\n");
 	bnx2x_vfop_end(bp, vf, vfop);
+op_pending:
+	/* Not supported at the moment; Exists for macros only */
+	return;
 }
 
 int bnx2x_vfop_close_cmd(struct bnx2x *bp,
@@ -3119,6 +3135,60 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
 	   vf->abs_vfid, vf->op_current);
 }
 
+static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
+{
+	struct bnx2x_queue_state_params q_params;
+	u32 prev_flags;
+	int i, rc;
+
+	/* Verify changes are needed and record current Tx switching state */
+	prev_flags = bp->flags;
+	if (enable)
+		bp->flags |= TX_SWITCHING;
+	else
+		bp->flags &= ~TX_SWITCHING;
+	if (prev_flags == bp->flags)
+		return 0;
+
+	/* Verify state enables the sending of queue ramrods */
+	if ((bp->state != BNX2X_STATE_OPEN) ||
+	    (bnx2x_get_q_logical_state(bp,
+				      &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
+	     BNX2X_Q_LOGICAL_STATE_ACTIVE))
+		return 0;
+
+	/* send q. update ramrod to configure Tx switching */
+	memset(&q_params, 0, sizeof(q_params));
+	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+	q_params.cmd = BNX2X_Q_CMD_UPDATE;
+	__set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
+		  &q_params.params.update.update_flags);
+	if (enable)
+		__set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
+			  &q_params.params.update.update_flags);
+	else
+		__clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
+			    &q_params.params.update.update_flags);
+
+	/* send the ramrod on all the queues of the PF */
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		/* Set the appropriate Queue object */
+		q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+		/* Update the Queue state */
+		rc = bnx2x_queue_state_change(bp, &q_params);
+		if (rc) {
+			BNX2X_ERR("Failed to configure Tx switching\n");
+			return rc;
+		}
+	}
+
+	DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
+	return 0;
+}
+
 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
 {
 	struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
@@ -3146,12 +3216,14 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
 
 	bp->requested_nr_virtfn = num_vfs_param;
 	if (num_vfs_param == 0) {
+		bnx2x_set_pf_tx_switching(bp, false);
 		pci_disable_sriov(dev);
 		return 0;
 	} else {
 		return bnx2x_enable_sriov(bp);
 	}
 }
+
 #define IGU_ENTRY_SIZE 4
 
 int bnx2x_enable_sriov(struct bnx2x *bp)
@@ -3229,6 +3301,11 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
 	 */
 	DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
 	bnx2x_disable_sriov(bp);
+
+	rc = bnx2x_set_pf_tx_switching(bp, true);
+	if (rc)
+		return rc;
+
 	rc = pci_enable_sriov(bp->pdev, req_vfs);
 	if (rc) {
 		BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
@@ -3639,7 +3716,7 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
 
 	/* the mac address in bulletin board is valid and is new */
 	if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
-	    memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) {
+	    !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) {
 		/* update new mac to net device */
 		memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
 	}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 8c213fa52174..d9fcca1b5a9d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -269,6 +269,7 @@ struct bnx2x_virtf {
 	int leading_rss;
 
 	/* MCAST object */
+	int mcast_list_len;
 	struct bnx2x_mcast_obj		mcast_obj;
 
 	/* RSS configuration object */
@@ -664,11 +665,6 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
 			    struct bnx2x_vfop_filters *macs,
 			    int qid, bool drv_only);
 
-int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
-			    struct bnx2x_virtf *vf,
-			    struct bnx2x_vfop_cmd *cmd,
-			    int qid, u16 vid, bool add);
-
 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
 			     struct bnx2x_virtf *vf,
 			     struct bnx2x_vfop_cmd *cmd,
@@ -726,13 +722,6 @@ void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid);
 /* Handles an FLR (or VF_DISABLE) notification form the MCP */
 void bnx2x_vf_handle_flr_event(struct bnx2x *bp);
 
-void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
-		   u16 length);
-void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
-		     u16 type, u16 length);
-void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv);
-void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list);
-
 bool bnx2x_tlv_supported(u16 tlvtype);
 
 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
@@ -749,7 +738,6 @@ int bnx2x_vfpf_init(struct bnx2x *bp);
 void bnx2x_vfpf_close_vf(struct bnx2x *bp);
 int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 		       bool is_leading);
-int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
 int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
 int bnx2x_vfpf_config_rss(struct bnx2x *bp,
 			  struct bnx2x_config_rss_params *params);
@@ -813,7 +801,6 @@ static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
 static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
 static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
 static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
-static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
 static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
 					u8 vf_qid, bool set) {return 0; }
 static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 0756d7dabdd5..3fa6c2a2a5a9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -21,9 +21,11 @@
 #include "bnx2x_cmn.h"
 #include <linux/crc32.h>
 
+static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
+
 /* place a given tlv on the tlv buffer at a given offset */
-void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
-		   u16 length)
+static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list,
+			  u16 offset, u16 type, u16 length)
 {
 	struct channel_tlv *tl =
 		(struct channel_tlv *)(tlvs_list + offset);
@@ -33,8 +35,8 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
 }
 
 /* Clear the mailbox and init the header of the first tlv */
-void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
-		     u16 type, u16 length)
+static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
+			    u16 type, u16 length)
 {
 	mutex_lock(&bp->vf2pf_mutex);
 
@@ -52,7 +54,8 @@ void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
 }
 
 /* releases the mailbox */
-void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
+static void bnx2x_vfpf_finalize(struct bnx2x *bp,
+				struct vfpf_first_tlv *first_tlv)
 {
 	DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
 	   first_tlv->tl.type);
@@ -85,7 +88,7 @@ static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
 }
 
 /* list the types and lengths of the tlvs on the buffer */
-void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
+static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
 {
 	int i = 1;
 	struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
@@ -633,7 +636,7 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 	return rc;
 }
 
-int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
+static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
 {
 	struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
 	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
@@ -800,14 +803,18 @@ int bnx2x_vfpf_config_rss(struct bnx2x *bp,
 	}
 
 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
-		BNX2X_ERR("failed to send rss message to PF over Vf PF channel %d\n",
-			  resp->hdr.status);
-		rc = -EINVAL;
+		/* Since older drivers don't support this feature (and VF has
+		 * no way of knowing other than failing this), don't propagate
+		 * an error in this case.
+		 */
+		DP(BNX2X_MSG_IOV,
+		   "Failed to send rss message to PF over VF-PF channel [%d]\n",
+		   resp->hdr.status);
 	}
 out:
 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
 
-	return 0;
+	return rc;
 }
 
 int bnx2x_vfpf_set_mcast(struct net_device *dev)
@@ -1416,6 +1423,14 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
 				setup_q->rxq.cache_line_log;
 			rxq_params->sb_cq_index = setup_q->rxq.sb_index;
 
+			/* rx setup - multicast engine */
+			if (bnx2x_vfq_is_leading(q)) {
+				u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
+
+				rxq_params->mcast_engine_id = mcast_id;
+				__set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
+			}
+
 			bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
 						 q->index, q->sb_idx);
 		}
@@ -1706,7 +1721,7 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
 
 		/* ...and only the mac set by the ndo */
 		if (filters->n_mac_vlan_filters == 1 &&
-		    memcmp(filters->filters->mac, bulletin->mac, ETH_ALEN)) {
+		    !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
 			BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
 				  vf->abs_vfid);
 
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index f58a8b80302d..fcf9105a5476 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5220,6 +5220,7 @@ static void cnic_init_rings(struct cnic_dev *dev)
 		cnic_ring_ctl(dev, cid, cli, 1);
 		*cid_ptr = cid >> 4;
 		*(cid_ptr + 1) = cid * bp->db_size;
+		*(cid_ptr + 2) = UIO_USE_TX_DOORBELL;
 	}
 }
 
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 0121a5d55192..0d6b13f854d9 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -186,6 +186,8 @@ struct kcq_info {
 	u16		(*hw_idx)(u16);
 };
 
+#define UIO_USE_TX_DOORBELL 0x017855DB
+
 struct cnic_uio_dev {
 	struct uio_info		cnic_uinfo;
 	u32			uio_dev;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index ebbfe25acaa6..8cf6b1926069 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -14,8 +14,8 @@
 
 #include "bnx2x/bnx2x_mfw_req.h"
 
-#define CNIC_MODULE_VERSION	"2.5.18"
-#define CNIC_MODULE_RELDATE	"Sept 01, 2013"
+#define CNIC_MODULE_VERSION	"2.5.19"
+#define CNIC_MODULE_RELDATE	"December 19, 2013"
 
 #define CNIC_ULP_RDMA		0
 #define CNIC_ULP_ISCSI		1
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index c2777712da99..b61c14ed9b8d 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -13,8 +13,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  *
  * This driver is designed for the Broadcom SiByte SOC built-in
@@ -36,7 +35,6 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
-#include <linux/init.h>
 #include <linux/bitops.h>
 #include <linux/err.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 15a66e4b1f57..e2ca03e23dc1 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -25,7 +25,6 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/in.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/pci.h>
@@ -37,6 +36,7 @@
 #include <linux/mii.h>
 #include <linux/phy.h>
 #include <linux/brcmphy.h>
+#include <linux/if.h>
 #include <linux/if_vlan.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 
 #define DRV_MODULE_NAME		"tg3"
 #define TG3_MAJ_NUM			3
-#define TG3_MIN_NUM			134
+#define TG3_MIN_NUM			136
 #define DRV_MODULE_VERSION	\
 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE	"Sep 16, 2013"
+#define DRV_MODULE_RELDATE	"Jan 03, 2014"
 
 #define RESET_KIND_SHUTDOWN	0
 #define RESET_KIND_INIT		1
@@ -208,6 +208,9 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 
 #define TG3_RAW_IP_ALIGN 2
 
+#define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
+#define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
+
 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
 
@@ -3948,32 +3951,41 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
 	return 0;
 }
 
+/* tp->lock is held. */
+static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
+{
+	u32 addr_high, addr_low;
+
+	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
+	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
+		    (mac_addr[4] <<  8) | mac_addr[5]);
+
+	if (index < 4) {
+		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
+		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
+	} else {
+		index -= 4;
+		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
+		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
+	}
+}
 
 /* tp->lock is held. */
 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
 {
-	u32 addr_high, addr_low;
+	u32 addr_high;
 	int i;
 
-	addr_high = ((tp->dev->dev_addr[0] << 8) |
-		     tp->dev->dev_addr[1]);
-	addr_low = ((tp->dev->dev_addr[2] << 24) |
-		    (tp->dev->dev_addr[3] << 16) |
-		    (tp->dev->dev_addr[4] <<  8) |
-		    (tp->dev->dev_addr[5] <<  0));
 	for (i = 0; i < 4; i++) {
 		if (i == 1 && skip_mac_1)
 			continue;
-		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
-		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
+		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
 	}
 
 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
-		for (i = 0; i < 12; i++) {
-			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
-			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
-		}
+		for (i = 4; i < 16; i++)
+			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
 	}
 
 	addr_high = (tp->dev->dev_addr[0] +
@@ -4403,9 +4415,12 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
 			if (tg3_flag(tp, WOL_SPEED_100MB))
 				adv |= ADVERTISED_100baseT_Half |
 				       ADVERTISED_100baseT_Full;
-			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
-				adv |= ADVERTISED_1000baseT_Half |
-				       ADVERTISED_1000baseT_Full;
+			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
+				if (!(tp->phy_flags &
+				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
+					adv |= ADVERTISED_1000baseT_Half;
+				adv |= ADVERTISED_1000baseT_Full;
+			}
 
 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
 		} else {
@@ -8925,6 +8940,49 @@ static void tg3_restore_pci_state(struct tg3 *tp)
 	}
 }
 
+static void tg3_override_clk(struct tg3 *tp)
+{
+	u32 val;
+
+	switch (tg3_asic_rev(tp)) {
+	case ASIC_REV_5717:
+		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
+		     TG3_CPMU_MAC_ORIDE_ENABLE);
+		break;
+
+	case ASIC_REV_5719:
+	case ASIC_REV_5720:
+		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
+		break;
+
+	default:
+		return;
+	}
+}
+
+static void tg3_restore_clk(struct tg3 *tp)
+{
+	u32 val;
+
+	switch (tg3_asic_rev(tp)) {
+	case ASIC_REV_5717:
+		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
+		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
+		break;
+
+	case ASIC_REV_5719:
+	case ASIC_REV_5720:
+		val = tr32(TG3_CPMU_CLCK_ORIDE);
+		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
+		break;
+
+	default:
+		return;
+	}
+}
+
 /* tp->lock is held. */
 static int tg3_chip_reset(struct tg3 *tp)
 {
@@ -9013,6 +9071,13 @@ static int tg3_chip_reset(struct tg3 *tp)
 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
 	}
 
+	/* Set the clock to the highest frequency to avoid timeouts. With link
+	 * aware mode, the clock speed could be slow and bootcode does not
+	 * complete within the expected time. Override the clock to allow the
+	 * bootcode to finish sooner and then restore it.
+	 */
+	tg3_override_clk(tp);
+
 	/* Manage gphy power for all CPMU absent PCIe devices. */
 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
@@ -9151,10 +9216,7 @@ static int tg3_chip_reset(struct tg3 *tp)
 		tw32(0x7c00, val | (1 << 25));
 	}
 
-	if (tg3_asic_rev(tp) == ASIC_REV_5720) {
-		val = tr32(TG3_CPMU_CLCK_ORIDE);
-		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
-	}
+	tg3_restore_clk(tp);
 
 	/* Reprobe ASF enable state.  */
 	tg3_flag_clear(tp, ENABLE_ASF);
@@ -9186,6 +9248,7 @@ static int tg3_chip_reset(struct tg3 *tp)
 
 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
+static void __tg3_set_rx_mode(struct net_device *);
 
 /* tp->lock is held. */
 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
@@ -9246,6 +9309,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
 	}
 	spin_lock_bh(&tp->lock);
 	__tg3_set_mac_addr(tp, skip_mac_1);
+	__tg3_set_rx_mode(dev);
 	spin_unlock_bh(&tp->lock);
 
 	return err;
@@ -9634,6 +9698,20 @@ static void __tg3_set_rx_mode(struct net_device *dev)
 		tw32(MAC_HASH_REG_3, mc_filter[3]);
 	}
 
+	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
+		rx_mode |= RX_MODE_PROMISC;
+	} else if (!(dev->flags & IFF_PROMISC)) {
+		/* Add all entries into to the mac addr filter list */
+		int i = 0;
+		struct netdev_hw_addr *ha;
+
+		netdev_for_each_uc_addr(ha, dev) {
+			__tg3_set_one_mac_addr(tp, ha->addr,
+					       i + TG3_UCAST_ADDR_IDX(tp));
+			i++;
+		}
+	}
+
 	if (rx_mode != tp->rx_mode) {
 		tp->rx_mode = rx_mode;
 		tw32_f(MAC_RX_MODE, rx_mode);
@@ -9966,6 +10044,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
@@ -10751,6 +10830,7 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
 
 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
+	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
@@ -10879,6 +10959,13 @@ static void tg3_timer(unsigned long __opaque)
 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
 			   tg3_flag(tp, 5780_CLASS)) {
 			tg3_serdes_parallel_detect(tp);
+		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
+			u32 cpmu = tr32(TG3_CPMU_STATUS);
+			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
+					 TG3_CPMU_STATUS_LINK_MASK);
+
+			if (link_up != tp->link_up)
+				tg3_setup_phy(tp, false);
 		}
 
 		tp->timer_counter = tp->timer_multiplier;
@@ -11746,8 +11833,6 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
 		get_stat64(&hw_stats->rx_undersize_packets);
 
-	stats->rx_over_errors = old_stats->rx_over_errors +
-		get_stat64(&hw_stats->rxbds_empty);
 	stats->rx_frame_errors = old_stats->rx_frame_errors +
 		get_stat64(&hw_stats->rx_align_errors);
 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
@@ -13594,14 +13679,13 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
 
 }
 
-static int tg3_hwtstamp_ioctl(struct net_device *dev,
-			      struct ifreq *ifr, int cmd)
+static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
 {
 	struct tg3 *tp = netdev_priv(dev);
 	struct hwtstamp_config stmpconf;
 
 	if (!tg3_flag(tp, PTP_CAPABLE))
-		return -EINVAL;
+		return -EOPNOTSUPP;
 
 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
 		return -EFAULT;
@@ -13682,6 +13766,67 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev,
 		-EFAULT : 0;
 }
 
+static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	struct hwtstamp_config stmpconf;
+
+	if (!tg3_flag(tp, PTP_CAPABLE))
+		return -EOPNOTSUPP;
+
+	stmpconf.flags = 0;
+	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
+			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
+
+	switch (tp->rxptpctl) {
+	case 0:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
+		break;
+	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
+		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+		break;
+	default:
+		WARN_ON_ONCE(1);
+		return -ERANGE;
+	}
+
+	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
+		-EFAULT : 0;
+}
+
 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
 	struct mii_ioctl_data *data = if_mii(ifr);
@@ -13735,7 +13880,10 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 		return err;
 
 	case SIOCSHWTSTAMP:
-		return tg3_hwtstamp_ioctl(dev, ifr, cmd);
+		return tg3_hwtstamp_set(dev, ifr);
+
+	case SIOCGHWTSTAMP:
+		return tg3_hwtstamp_get(dev, ifr);
 
 	default:
 		/* do nothing */
@@ -14856,7 +15004,8 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
 		u32 nic_cfg, led_cfg;
-		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
+		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
+		u32 nic_phy_id, ver, eeprom_phy_id;
 		int eeprom_phy_serdes = 0;
 
 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
@@ -14873,6 +15022,11 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
 
+		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
+		    tg3_asic_rev(tp) == ASIC_REV_5720)
+			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
+
 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
 			eeprom_phy_serdes = 1;
@@ -15025,6 +15179,9 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
+
+		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
+			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
 	}
 done:
 	if (tg3_flag(tp, WOL_CAP))
@@ -15120,9 +15277,11 @@ static void tg3_phy_init_link_config(struct tg3 *tp)
 {
 	u32 adv = ADVERTISED_Autoneg;
 
-	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
-		adv |= ADVERTISED_1000baseT_Half |
-		       ADVERTISED_1000baseT_Full;
+	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
+			adv |= ADVERTISED_1000baseT_Half;
+		adv |= ADVERTISED_1000baseT_Full;
+	}
 
 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
 		adv |= ADVERTISED_100baseT_Half |
@@ -16470,6 +16629,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
 
 	/* Set these bits to enable statistics workaround. */
 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
@@ -16612,6 +16772,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
 	else
 		tg3_flag_clear(tp, POLL_SERDES);
 
+	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
+		tg3_flag_set(tp, POLL_CPMU_LINK);
+
 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
@@ -17533,6 +17696,7 @@ static int tg3_init_one(struct pci_dev *pdev,
 		features |= NETIF_F_LOOPBACK;
 
 	dev->hw_features |= features;
+	dev->priv_flags |= IFF_UNICAST_FLT;
 
 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
 	    !tg3_flag(tp, TSO_CAPABLE) &&
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 5c3835aa1e1b..ef472385bce4 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1146,10 +1146,14 @@
 #define TG3_CPMU_CLCK_ORIDE		0x00003624
 #define  CPMU_CLCK_ORIDE_MAC_ORIDE_EN	 0x80000000
 
+#define TG3_CPMU_CLCK_ORIDE_ENABLE	0x00003628
+#define  TG3_CPMU_MAC_ORIDE_ENABLE	 (1 << 13)
+
 #define TG3_CPMU_STATUS			0x0000362c
 #define  TG3_CPMU_STATUS_FMSK_5717	 0x20000000
 #define  TG3_CPMU_STATUS_FMSK_5719	 0xc0000000
 #define  TG3_CPMU_STATUS_FSHFT_5719	 30
+#define  TG3_CPMU_STATUS_LINK_MASK	 0x180000
 
 #define TG3_CPMU_CLCK_STAT		0x00003630
 #define  CPMU_CLCK_STAT_MAC_CLCK_MASK	 0x001f0000
@@ -2204,7 +2208,7 @@
 
 #define NIC_SRAM_DATA_CFG_2		0x00000d38
 
-#define  NIC_SRAM_DATA_CFG_2_APD_EN	 0x00000400
+#define  NIC_SRAM_DATA_CFG_2_APD_EN	 0x00004000
 #define  SHASTA_EXT_LED_MODE_MASK	 0x00018000
 #define  SHASTA_EXT_LED_LEGACY		 0x00000000
 #define  SHASTA_EXT_LED_SHARED		 0x00008000
@@ -2226,6 +2230,9 @@
 #define  NIC_SRAM_CPMUSTAT_SIG		0x0000362c
 #define  NIC_SRAM_CPMUSTAT_SIG_MSK	0x0000ffff
 
+#define NIC_SRAM_DATA_CFG_5		0x00000e0c
+#define  NIC_SRAM_DISABLE_1G_HALF_ADV	0x00000002
+
 #define NIC_SRAM_RX_MINI_BUFFER_DESC	0x00001000
 
 #define NIC_SRAM_DMA_DESC_POOL_BASE	0x00002000
@@ -3014,6 +3021,7 @@ enum TG3_FLAGS {
 	TG3_FLAG_ENABLE_ASF,
 	TG3_FLAG_ASPM_WORKAROUND,
 	TG3_FLAG_POLL_SERDES,
+	TG3_FLAG_POLL_CPMU_LINK,
 	TG3_FLAG_MBOX_WRITE_REORDER,
 	TG3_FLAG_PCIX_TARGET_HWBUG,
 	TG3_FLAG_WOL_SPEED_100MB,
@@ -3325,6 +3333,7 @@ struct tg3 {
 #define TG3_PHYFLG_1G_ON_VAUX_OK	0x00080000
 #define TG3_PHYFLG_KEEP_LINK_ON_PWRDN	0x00100000
 #define TG3_PHYFLG_MDIX_STATE		0x00200000
+#define TG3_PHYFLG_DISABLE_1G_HD_ADV	0x00400000
 
 	u32				led_ctrl;
 	u32				phy_otp;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 6f3cac060f29..1803c3959044 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -22,6 +22,14 @@
 
 /* IOC local definitions */
 
+#define bfa_ioc_state_disabled(__sm)			\
+	(((__sm) == BFI_IOC_UNINIT) ||			\
+	 ((__sm) == BFI_IOC_INITING) ||			\
+	 ((__sm) == BFI_IOC_HWINIT) ||			\
+	 ((__sm) == BFI_IOC_DISABLED) ||		\
+	 ((__sm) == BFI_IOC_FAIL) ||			\
+	 ((__sm) == BFI_IOC_CFG_DISABLED))
+
 /* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
 
 #define bfa_ioc_firmware_lock(__ioc)			\
@@ -42,6 +50,14 @@
 			((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
 #define bfa_ioc_sync_complete(__ioc)			\
 			((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
+#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate)		\
+			((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
+#define bfa_ioc_get_cur_ioc_fwstate(__ioc)		\
+			((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
+#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate)		\
+		((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
+#define bfa_ioc_get_alt_ioc_fwstate(__ioc)		\
+			((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
 
 #define bfa_ioc_mbox_cmd_pending(__ioc)		\
 			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
@@ -76,8 +92,8 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
 static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
-static void bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
-			 u32 boot_param);
+static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc,
+			enum bfi_fwboot_type boot_type, u32 boot_param);
 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
 						char *serial_num);
@@ -860,7 +876,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
 		 */
 
 	case IOCPF_E_TIMEOUT:
-		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
 		break;
 
@@ -949,7 +965,7 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
 	case IOCPF_E_SEMLOCKED:
 		bfa_ioc_notify_fail(ioc);
 		bfa_ioc_sync_leave(ioc);
-		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
 		bfa_nw_ioc_hw_sem_release(ioc);
 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
 		break;
@@ -1031,7 +1047,7 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
 		bfa_ioc_notify_fail(ioc);
 		if (!iocpf->auto_recover) {
 			bfa_ioc_sync_leave(ioc);
-			writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+			bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
 			bfa_nw_ioc_hw_sem_release(ioc);
 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
 		} else {
@@ -1162,7 +1178,7 @@ bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
 		r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
 	}
 
-	fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+	fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
 	if (fwstate == BFI_IOC_UNINIT) {
 		writel(1, ioc->ioc_regs.ioc_init_sem_reg);
 		return;
@@ -1176,8 +1192,8 @@ bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
 	}
 
 	bfa_ioc_fwver_clear(ioc);
-	writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
-	writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+	bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
+	bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
 
 	/*
 	 * Try to lock and then unlock the semaphore.
@@ -1309,22 +1325,504 @@ bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
 	}
 }
 
-/* Returns TRUE if same. */
+static bool
+bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1,
+			struct bfi_ioc_image_hdr *fwhdr_2)
+{
+	int i;
+
+	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
+		if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
+			return false;
+	}
+
+	return true;
+}
+
+/* Returns TRUE if major minor and maintainence are same.
+ * If patch version are same, check for MD5 Checksum to be same.
+ */
+static bool
+bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr,
+			  struct bfi_ioc_image_hdr *fwhdr_to_cmp)
+{
+	if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
+		return false;
+	if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
+		return false;
+	if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
+		return false;
+	if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
+		return false;
+	if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
+	    drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
+	    drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build)
+		return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
+
+	return true;
+}
+
+static bool
+bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr)
+{
+	if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
+		return false;
+
+	return true;
+}
+
+static bool
+fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr)
+{
+	if (fwhdr->fwver.phase == 0 &&
+	    fwhdr->fwver.build == 0)
+		return false;
+
+	return true;
+}
+
+/* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */
+static enum bfi_ioc_img_ver_cmp
+bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
+			 struct bfi_ioc_image_hdr *fwhdr_to_cmp)
+{
+	if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == false)
+		return BFI_IOC_IMG_VER_INCOMP;
+
+	if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
+		return BFI_IOC_IMG_VER_BETTER;
+	else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
+		return BFI_IOC_IMG_VER_OLD;
+
+	/* GA takes priority over internal builds of the same patch stream.
+	 * At this point major minor maint and patch numbers are same.
+	 */
+	if (fwhdr_is_ga(base_fwhdr) == true)
+		if (fwhdr_is_ga(fwhdr_to_cmp))
+			return BFI_IOC_IMG_VER_SAME;
+		else
+			return BFI_IOC_IMG_VER_OLD;
+	else
+		if (fwhdr_is_ga(fwhdr_to_cmp))
+			return BFI_IOC_IMG_VER_BETTER;
+
+	if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
+		return BFI_IOC_IMG_VER_BETTER;
+	else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
+		return BFI_IOC_IMG_VER_OLD;
+
+	if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
+		return BFI_IOC_IMG_VER_BETTER;
+	else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
+		return BFI_IOC_IMG_VER_OLD;
+
+	/* All Version Numbers are equal.
+	 * Md5 check to be done as a part of compatibility check.
+	 */
+	return BFI_IOC_IMG_VER_SAME;
+}
+
+/* register definitions */
+#define FLI_CMD_REG			0x0001d000
+#define FLI_WRDATA_REG			0x0001d00c
+#define FLI_RDDATA_REG			0x0001d010
+#define FLI_ADDR_REG			0x0001d004
+#define FLI_DEV_STATUS_REG		0x0001d014
+
+#define BFA_FLASH_FIFO_SIZE		128	/* fifo size */
+#define BFA_FLASH_CHECK_MAX		10000	/* max # of status check */
+#define BFA_FLASH_BLOCKING_OP_MAX	1000000	/* max # of blocking op check */
+#define BFA_FLASH_WIP_MASK		0x01	/* write in progress bit mask */
+
+#define NFC_STATE_RUNNING		0x20000001
+#define NFC_STATE_PAUSED		0x00004560
+#define NFC_VER_VALID			0x147
+
+enum bfa_flash_cmd {
+	BFA_FLASH_FAST_READ	= 0x0b,	/* fast read */
+	BFA_FLASH_WRITE_ENABLE	= 0x06,	/* write enable */
+	BFA_FLASH_SECTOR_ERASE	= 0xd8,	/* sector erase */
+	BFA_FLASH_WRITE		= 0x02,	/* write */
+	BFA_FLASH_READ_STATUS	= 0x05,	/* read status */
+};
+
+/* hardware error definition */
+enum bfa_flash_err {
+	BFA_FLASH_NOT_PRESENT	= -1,	/*!< flash not present */
+	BFA_FLASH_UNINIT	= -2,	/*!< flash not initialized */
+	BFA_FLASH_BAD		= -3,	/*!< flash bad */
+	BFA_FLASH_BUSY		= -4,	/*!< flash busy */
+	BFA_FLASH_ERR_CMD_ACT	= -5,	/*!< command active never cleared */
+	BFA_FLASH_ERR_FIFO_CNT	= -6,	/*!< fifo count never cleared */
+	BFA_FLASH_ERR_WIP	= -7,	/*!< write-in-progress never cleared */
+	BFA_FLASH_ERR_TIMEOUT	= -8,	/*!< fli timeout */
+	BFA_FLASH_ERR_LEN	= -9,	/*!< invalid length */
+};
+
+/* flash command register data structure */
+union bfa_flash_cmd_reg {
+	struct {
+#ifdef __BIG_ENDIAN
+		u32	act:1;
+		u32	rsv:1;
+		u32	write_cnt:9;
+		u32	read_cnt:9;
+		u32	addr_cnt:4;
+		u32	cmd:8;
+#else
+		u32	cmd:8;
+		u32	addr_cnt:4;
+		u32	read_cnt:9;
+		u32	write_cnt:9;
+		u32	rsv:1;
+		u32	act:1;
+#endif
+	} r;
+	u32	i;
+};
+
+/* flash device status register data structure */
+union bfa_flash_dev_status_reg {
+	struct {
+#ifdef __BIG_ENDIAN
+		u32	rsv:21;
+		u32	fifo_cnt:6;
+		u32	busy:1;
+		u32	init_status:1;
+		u32	present:1;
+		u32	bad:1;
+		u32	good:1;
+#else
+		u32	good:1;
+		u32	bad:1;
+		u32	present:1;
+		u32	init_status:1;
+		u32	busy:1;
+		u32	fifo_cnt:6;
+		u32	rsv:21;
+#endif
+	} r;
+	u32	i;
+};
+
+/* flash address register data structure */
+union bfa_flash_addr_reg {
+	struct {
+#ifdef __BIG_ENDIAN
+		u32	addr:24;
+		u32	dummy:8;
+#else
+		u32	dummy:8;
+		u32	addr:24;
+#endif
+	} r;
+	u32	i;
+};
+
+/* Flash raw private functions */
+static void
+bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
+		  u8 rd_cnt, u8 ad_cnt, u8 op)
+{
+	union bfa_flash_cmd_reg cmd;
+
+	cmd.i = 0;
+	cmd.r.act = 1;
+	cmd.r.write_cnt = wr_cnt;
+	cmd.r.read_cnt = rd_cnt;
+	cmd.r.addr_cnt = ad_cnt;
+	cmd.r.cmd = op;
+	writel(cmd.i, (pci_bar + FLI_CMD_REG));
+}
+
+static void
+bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
+{
+	union bfa_flash_addr_reg addr;
+
+	addr.r.addr = address & 0x00ffffff;
+	addr.r.dummy = 0;
+	writel(addr.i, (pci_bar + FLI_ADDR_REG));
+}
+
+static int
+bfa_flash_cmd_act_check(void __iomem *pci_bar)
+{
+	union bfa_flash_cmd_reg cmd;
+
+	cmd.i = readl(pci_bar + FLI_CMD_REG);
+
+	if (cmd.r.act)
+		return BFA_FLASH_ERR_CMD_ACT;
+
+	return 0;
+}
+
+/* Flush FLI data fifo. */
+static u32
+bfa_flash_fifo_flush(void __iomem *pci_bar)
+{
+	u32 i;
+	u32 t;
+	union bfa_flash_dev_status_reg dev_status;
+
+	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+
+	if (!dev_status.r.fifo_cnt)
+		return 0;
+
+	/* fifo counter in terms of words */
+	for (i = 0; i < dev_status.r.fifo_cnt; i++)
+		t = readl(pci_bar + FLI_RDDATA_REG);
+
+	/* Check the device status. It may take some time. */
+	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
+		dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+		if (!dev_status.r.fifo_cnt)
+			break;
+	}
+
+	if (dev_status.r.fifo_cnt)
+		return BFA_FLASH_ERR_FIFO_CNT;
+
+	return 0;
+}
+
+/* Read flash status. */
+static u32
+bfa_flash_status_read(void __iomem *pci_bar)
+{
+	union bfa_flash_dev_status_reg	dev_status;
+	u32				status;
+	u32			ret_status;
+	int				i;
+
+	status = bfa_flash_fifo_flush(pci_bar);
+	if (status < 0)
+		return status;
+
+	bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
+
+	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
+		status = bfa_flash_cmd_act_check(pci_bar);
+		if (!status)
+			break;
+	}
+
+	if (status)
+		return status;
+
+	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+	if (!dev_status.r.fifo_cnt)
+		return BFA_FLASH_BUSY;
+
+	ret_status = readl(pci_bar + FLI_RDDATA_REG);
+	ret_status >>= 24;
+
+	status = bfa_flash_fifo_flush(pci_bar);
+	if (status < 0)
+		return status;
+
+	return ret_status;
+}
+
+/* Start flash read operation. */
+static u32
+bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
+		     char *buf)
+{
+	u32 status;
+
+	/* len must be mutiple of 4 and not exceeding fifo size */
+	if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
+		return BFA_FLASH_ERR_LEN;
+
+	/* check status */
+	status = bfa_flash_status_read(pci_bar);
+	if (status == BFA_FLASH_BUSY)
+		status = bfa_flash_status_read(pci_bar);
+
+	if (status < 0)
+		return status;
+
+	/* check if write-in-progress bit is cleared */
+	if (status & BFA_FLASH_WIP_MASK)
+		return BFA_FLASH_ERR_WIP;
+
+	bfa_flash_set_addr(pci_bar, offset);
+
+	bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
+
+	return 0;
+}
+
+/* Check flash read operation. */
+static u32
+bfa_flash_read_check(void __iomem *pci_bar)
+{
+	if (bfa_flash_cmd_act_check(pci_bar))
+		return 1;
+
+	return 0;
+}
+
+/* End flash read operation. */
+static void
+bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
+{
+	u32 i;
+
+	/* read data fifo up to 32 words */
+	for (i = 0; i < len; i += 4) {
+		u32 w = readl(pci_bar + FLI_RDDATA_REG);
+		*((u32 *)(buf + i)) = swab32(w);
+	}
+
+	bfa_flash_fifo_flush(pci_bar);
+}
+
+/* Perform flash raw read. */
+
+#define FLASH_BLOCKING_OP_MAX   500
+#define FLASH_SEM_LOCK_REG	0x18820
+
+static int
+bfa_raw_sem_get(void __iomem *bar)
+{
+	int	locked;
+
+	locked = readl((bar + FLASH_SEM_LOCK_REG));
+
+	return !locked;
+}
+
+static enum bfa_status
+bfa_flash_sem_get(void __iomem *bar)
+{
+	u32 n = FLASH_BLOCKING_OP_MAX;
+
+	while (!bfa_raw_sem_get(bar)) {
+		if (--n <= 0)
+			return BFA_STATUS_BADFLASH;
+		udelay(10000);
+	}
+	return BFA_STATUS_OK;
+}
+
+static void
+bfa_flash_sem_put(void __iomem *bar)
+{
+	writel(0, (bar + FLASH_SEM_LOCK_REG));
+}
+
+static enum bfa_status
+bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
+		   u32 len)
+{
+	u32 n, status;
+	u32 off, l, s, residue, fifo_sz;
+
+	residue = len;
+	off = 0;
+	fifo_sz = BFA_FLASH_FIFO_SIZE;
+	status = bfa_flash_sem_get(pci_bar);
+	if (status != BFA_STATUS_OK)
+		return status;
+
+	while (residue) {
+		s = offset + off;
+		n = s / fifo_sz;
+		l = (n + 1) * fifo_sz - s;
+		if (l > residue)
+			l = residue;
+
+		status = bfa_flash_read_start(pci_bar, offset + off, l,
+								&buf[off]);
+		if (status < 0) {
+			bfa_flash_sem_put(pci_bar);
+			return BFA_STATUS_FAILED;
+		}
+
+		n = BFA_FLASH_BLOCKING_OP_MAX;
+		while (bfa_flash_read_check(pci_bar)) {
+			if (--n <= 0) {
+				bfa_flash_sem_put(pci_bar);
+				return BFA_STATUS_FAILED;
+			}
+		}
+
+		bfa_flash_read_end(pci_bar, l, &buf[off]);
+
+		residue -= l;
+		off += l;
+	}
+	bfa_flash_sem_put(pci_bar);
+
+	return BFA_STATUS_OK;
+}
+
+#define BFA_FLASH_PART_FWIMG_ADDR	0x100000 /* fw image address */
+
+static enum bfa_status
+bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off,
+			      u32 *fwimg)
+{
+	return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
+			BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
+			(char *)fwimg, BFI_FLASH_CHUNK_SZ);
+}
+
+static enum bfi_ioc_img_ver_cmp
+bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc,
+			struct bfi_ioc_image_hdr *base_fwhdr)
+{
+	struct bfi_ioc_image_hdr *flash_fwhdr;
+	enum bfa_status status;
+	u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
+
+	status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg);
+	if (status != BFA_STATUS_OK)
+		return BFI_IOC_IMG_VER_INCOMP;
+
+	flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg;
+	if (bfa_ioc_flash_fwver_valid(flash_fwhdr))
+		return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
+	else
+		return BFI_IOC_IMG_VER_INCOMP;
+}
+
+/**
+ * Returns TRUE if driver is willing to work with current smem f/w version.
+ */
 bool
 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
 {
 	struct bfi_ioc_image_hdr *drv_fwhdr;
-	int i;
+	enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp;
 
 	drv_fwhdr = (struct bfi_ioc_image_hdr *)
 		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
 
-	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
-		if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
-			return false;
+	/* If smem is incompatible or old, driver should not work with it. */
+	drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr);
+	if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
+	    drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
+		return false;
 	}
 
-	return true;
+	/* IF Flash has a better F/W than smem do not work with smem.
+	 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
+	 * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
+	 */
+	smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr);
+
+	if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER)
+		return false;
+	else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME)
+		return true;
+	else
+		return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
+			true : false;
 }
 
 /* Return true if current running version is valid. Firmware signature and
@@ -1333,15 +1831,9 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
 static bool
 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
 {
-	struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
+	struct bfi_ioc_image_hdr fwhdr;
 
 	bfa_nw_ioc_fwver_get(ioc, &fwhdr);
-	drv_fwhdr = (struct bfi_ioc_image_hdr *)
-		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
-
-	if (fwhdr.signature != drv_fwhdr->signature)
-		return false;
-
 	if (swab32(fwhdr.bootenv) != boot_env)
 		return false;
 
@@ -1366,7 +1858,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
 	bool fwvalid;
 	u32 boot_env;
 
-	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+	ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
 
 	if (force)
 		ioc_fwstate = BFI_IOC_UNINIT;
@@ -1380,8 +1872,10 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
 		false : bfa_ioc_fwver_valid(ioc, boot_env);
 
 	if (!fwvalid) {
-		bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
-		bfa_ioc_poll_fwinit(ioc);
+		if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
+								BFA_STATUS_OK)
+			bfa_ioc_poll_fwinit(ioc);
+
 		return;
 	}
 
@@ -1411,8 +1905,9 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
 	/**
 	 * Initialize the h/w for any other states.
 	 */
-	bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
-	bfa_ioc_poll_fwinit(ioc);
+	if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
+							BFA_STATUS_OK)
+		bfa_ioc_poll_fwinit(ioc);
 }
 
 void
@@ -1517,7 +2012,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
 }
 
 /* Initiate a full firmware download. */
-static void
+static enum bfa_status
 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
 		    u32 boot_env)
 {
@@ -1527,18 +2022,47 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
 	u32 chunkno = 0;
 	u32 i;
 	u32 asicmode;
+	u32 fwimg_size;
+	u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
+	enum bfa_status status;
 
-	fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
+	if (boot_env == BFI_FWBOOT_ENV_OS &&
+	    boot_type == BFI_FWBOOT_TYPE_FLASH) {
+		fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
+
+		status = bfa_nw_ioc_flash_img_get_chnk(ioc,
+			BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
+		if (status != BFA_STATUS_OK)
+			return status;
+
+		fwimg = fwimg_buf;
+	} else {
+		fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
+		fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
+					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+	}
 
 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
 
 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 
-	for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
+	for (i = 0; i < fwimg_size; i++) {
 		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
 			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
-			fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
+			if (boot_env == BFI_FWBOOT_ENV_OS &&
+			    boot_type == BFI_FWBOOT_TYPE_FLASH) {
+				status = bfa_nw_ioc_flash_img_get_chnk(ioc,
+					BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
+					fwimg_buf);
+				if (status != BFA_STATUS_OK)
+					return status;
+
+				fwimg = fwimg_buf;
+			} else {
+				fwimg = bfa_cb_image_get_chunk(
+					bfa_ioc_asic_gen(ioc),
 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+			}
 		}
 
 		/**
@@ -1566,6 +2090,10 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
 	/*
 	 * Set boot type, env and device mode at the end.
 	*/
+	if (boot_env == BFI_FWBOOT_ENV_OS &&
+	    boot_type == BFI_FWBOOT_TYPE_FLASH) {
+		boot_type = BFI_FWBOOT_TYPE_NORMAL;
+	}
 	asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
 					ioc->port0_mode, ioc->port1_mode);
 	writel(asicmode, ((ioc->ioc_regs.smem_page_start)
@@ -1574,6 +2102,7 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
 			+ (BFI_FWBOOT_TYPE_OFF)));
 	writel(boot_env, ((ioc->ioc_regs.smem_page_start)
 			+ (BFI_FWBOOT_ENV_OFF)));
+	return BFA_STATUS_OK;
 }
 
 static void
@@ -1846,29 +2375,47 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
 /* Interface used by diag module to do firmware boot with memory test
  * as the entry vector.
  */
-static void
+static enum bfa_status
 bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
 		u32 boot_env)
 {
+	struct bfi_ioc_image_hdr *drv_fwhdr;
+	enum bfa_status status;
 	bfa_ioc_stats(ioc, ioc_boots);
 
 	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
-		return;
+		return BFA_STATUS_FAILED;
+	if (boot_env == BFI_FWBOOT_ENV_OS &&
+	    boot_type == BFI_FWBOOT_TYPE_NORMAL) {
+		drv_fwhdr = (struct bfi_ioc_image_hdr *)
+			bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
+		/* Work with Flash iff flash f/w is better than driver f/w.
+		 * Otherwise push drivers firmware.
+		 */
+		if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
+			BFI_IOC_IMG_VER_BETTER)
+			boot_type = BFI_FWBOOT_TYPE_FLASH;
+	}
 
 	/**
 	 * Initialize IOC state of all functions on a chip reset.
 	 */
 	if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
-		writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
-		writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
+		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
+		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
 	} else {
-		writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
-		writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
+		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
+		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
 	}
 
 	bfa_ioc_msgflush(ioc);
-	bfa_ioc_download_fw(ioc, boot_type, boot_env);
-	bfa_ioc_lpu_start(ioc);
+	status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
+	if (status == BFA_STATUS_OK)
+		bfa_ioc_lpu_start(ioc);
+	else
+		bfa_nw_iocpf_timeout(ioc);
+
+	return status;
 }
 
 /* Enable/disable IOC failure auto recovery. */
@@ -2473,7 +3020,7 @@ bfa_nw_iocpf_sem_timeout(void *ioc_arg)
 static void
 bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
 {
-	u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+	u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
 
 	if (fwstate == BFI_IOC_DISABLED) {
 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index f04e0aab25b4..20cff7df4b55 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -215,6 +215,13 @@ struct bfa_ioc_hwif {
 	void		(*ioc_sync_ack)		(struct bfa_ioc *ioc);
 	bool		(*ioc_sync_complete)	(struct bfa_ioc *ioc);
 	bool		(*ioc_lpu_read_stat)	(struct bfa_ioc *ioc);
+	void		(*ioc_set_fwstate)	(struct bfa_ioc *ioc,
+					enum bfi_ioc_state fwstate);
+	enum bfi_ioc_state (*ioc_get_fwstate) (struct bfa_ioc *ioc);
+	void		(*ioc_set_alt_fwstate)	(struct bfa_ioc *ioc,
+					enum bfi_ioc_state fwstate);
+	enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc *ioc);
+
 };
 
 #define bfa_ioc_pcifn(__ioc)		((__ioc)->pcidev.pci_func)
@@ -291,6 +298,7 @@ void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
 bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
 bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
 void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
+enum bfa_status bfa_nw_ioc_fwsig_invalidate(struct bfa_ioc *ioc);
 void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
 	struct bfa_ioc_notify *notify);
 bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index 5df0b0c68c5a..d639558455cb 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -48,6 +48,12 @@ static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
 static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
 static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_set_cur_ioc_fwstate(
+			struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_set_alt_ioc_fwstate(
+			struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc);
 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
 				enum bfi_asic_mode asic_mode);
 static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
@@ -68,6 +74,10 @@ static const struct bfa_ioc_hwif nw_hwif_ct = {
 	.ioc_sync_leave	     = bfa_ioc_ct_sync_leave,
 	.ioc_sync_ack	     = bfa_ioc_ct_sync_ack,
 	.ioc_sync_complete   = bfa_ioc_ct_sync_complete,
+	.ioc_set_fwstate     = bfa_ioc_ct_set_cur_ioc_fwstate,
+	.ioc_get_fwstate     = bfa_ioc_ct_get_cur_ioc_fwstate,
+	.ioc_set_alt_fwstate     = bfa_ioc_ct_set_alt_ioc_fwstate,
+	.ioc_get_alt_fwstate     = bfa_ioc_ct_get_alt_ioc_fwstate,
 };
 
 static const struct bfa_ioc_hwif nw_hwif_ct2 = {
@@ -85,6 +95,10 @@ static const struct bfa_ioc_hwif nw_hwif_ct2 = {
 	.ioc_sync_leave	     = bfa_ioc_ct_sync_leave,
 	.ioc_sync_ack	     = bfa_ioc_ct_sync_ack,
 	.ioc_sync_complete   = bfa_ioc_ct_sync_complete,
+	.ioc_set_fwstate     = bfa_ioc_ct_set_cur_ioc_fwstate,
+	.ioc_get_fwstate     = bfa_ioc_ct_get_cur_ioc_fwstate,
+	.ioc_set_alt_fwstate     = bfa_ioc_ct_set_alt_ioc_fwstate,
+	.ioc_get_alt_fwstate     = bfa_ioc_ct_get_alt_ioc_fwstate,
 };
 
 /* Called from bfa_ioc_attach() to map asic specific calls. */
@@ -565,6 +579,32 @@ bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
 	return false;
 }
 
+static void
+bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc,
+			       enum bfi_ioc_state fwstate)
+{
+	writel(fwstate, ioc->ioc_regs.ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc)
+{
+	return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
+}
+
+static void
+bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc,
+			       enum bfi_ioc_state fwstate)
+{
+	writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc)
+{
+	return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate);
+}
+
 static enum bfa_status
 bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
 {
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 1f24c23dc786..8c563a77cdf6 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -25,6 +25,7 @@
 /* BFI FW image type */
 #define	BFI_FLASH_CHUNK_SZ			256	/*!< Flash chunk size */
 #define	BFI_FLASH_CHUNK_SZ_WORDS	(BFI_FLASH_CHUNK_SZ/sizeof(u32))
+#define BFI_FLASH_IMAGE_SZ		0x100000
 
 /* Msg header common to all msgs */
 struct bfi_mhdr {
@@ -233,7 +234,29 @@ struct bfi_ioc_getattr_reply {
 #define BFI_IOC_TRC_HDR_SZ	32
 
 #define BFI_IOC_FW_SIGNATURE	(0xbfadbfad)
+#define BFI_IOC_FW_INV_SIGN	(0xdeaddead)
 #define BFI_IOC_MD5SUM_SZ	4
+
+struct bfi_ioc_fwver {
+#ifdef __BIG_ENDIAN
+	u8 patch;
+	u8 maint;
+	u8 minor;
+	u8 major;
+	u8 rsvd[2];
+	u8 build;
+	u8 phase;
+#else
+	u8 major;
+	u8 minor;
+	u8 maint;
+	u8 patch;
+	u8 phase;
+	u8 build;
+	u8 rsvd[2];
+#endif
+};
+
 struct bfi_ioc_image_hdr {
 	u32	signature;	/*!< constant signature */
 	u8	asic_gen;	/*!< asic generation */
@@ -242,10 +265,18 @@ struct bfi_ioc_image_hdr {
 	u8	port1_mode;	/*!< device mode for port 1 */
 	u32	exec;		/*!< exec vector	*/
 	u32	bootenv;	/*!< firmware boot env */
-	u32	rsvd_b[4];
+	u32	rsvd_b[2];
+	struct bfi_ioc_fwver fwver;
 	u32	md5sum[BFI_IOC_MD5SUM_SZ];
 };
 
+enum bfi_ioc_img_ver_cmp {
+	BFI_IOC_IMG_VER_INCOMP,
+	BFI_IOC_IMG_VER_OLD,
+	BFI_IOC_IMG_VER_SAME,
+	BFI_IOC_IMG_VER_BETTER
+};
+
 #define BFI_FWBOOT_DEVMODE_OFF		4
 #define BFI_FWBOOT_TYPE_OFF		8
 #define BFI_FWBOOT_ENV_OFF		12
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index 7d10e335c27d..ae072dc5d238 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -472,7 +472,8 @@ enum bfi_enet_hds_type {
 
 struct bfi_enet_rx_cfg {
 	u8		rxq_type;
-	u8		rsvd[3];
+	u8		rsvd[1];
+	u16		frame_size;
 
 	struct {
 		u8			max_header_size;
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index f1eafc409bbd..1f512190d696 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -354,6 +354,14 @@ do {									\
 	}								\
 } while (0)
 
+#define bna_mcam_mod_free_q(_bna) (&(_bna)->mcam_mod.free_q)
+
+#define bna_mcam_mod_del_q(_bna) (&(_bna)->mcam_mod.del_q)
+
+#define bna_ucam_mod_free_q(_bna) (&(_bna)->ucam_mod.free_q)
+
+#define bna_ucam_mod_del_q(_bna) (&(_bna)->ucam_mod.del_q)
+
 /*  Inline functions  */
 
 static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
@@ -391,12 +399,8 @@ int bna_num_rxp_set(struct bna *bna, int num_rxp);
 void bna_hw_stats_get(struct bna *bna);
 
 /* APIs for RxF */
-struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
-void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
-			  struct bna_mac *mac);
-struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
-void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
-			  struct bna_mac *mac);
+struct bna_mac *bna_cam_mod_mac_get(struct list_head *head);
+void bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac);
 struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
 void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
 			  struct bna_mcam_handle *handle);
@@ -493,11 +497,17 @@ enum bna_cb_status
 bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
 		 void (*cbfn)(struct bnad *, struct bna_rx *));
 enum bna_cb_status
+bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
+		     void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status
 bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
 		 void (*cbfn)(struct bnad *, struct bna_rx *));
 enum bna_cb_status
 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
 		     void (*cbfn)(struct bnad *, struct bna_rx *));
+void
+bna_rx_mcast_delall(struct bna_rx *rx,
+		    void (*cbfn)(struct bnad *, struct bna_rx *));
 enum bna_cb_status
 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
 		enum bna_rxmode bitmask,
@@ -505,6 +515,8 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
 void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
 void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
 void bna_rx_vlanfilter_enable(struct bna_rx *rx);
+void bna_rx_vlan_strip_enable(struct bna_rx *rx);
+void bna_rx_vlan_strip_disable(struct bna_rx *rx);
 /* ENET */
 
 /* API for RX */
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 3ca77fad4851..13f9636cdba7 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1811,6 +1811,13 @@ bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
 		list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
 	}
 
+	/* A separate queue to allow synchronous setting of a list of MACs */
+	INIT_LIST_HEAD(&ucam_mod->del_q);
+	for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) {
+		bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
+		list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
+	}
+
 	ucam_mod->bna = bna;
 }
 
@@ -1818,11 +1825,16 @@ static void
 bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
 {
 	struct list_head *qe;
-	int i = 0;
+	int i;
 
+	i = 0;
 	list_for_each(qe, &ucam_mod->free_q)
 		i++;
 
+	i = 0;
+	list_for_each(qe, &ucam_mod->del_q)
+		i++;
+
 	ucam_mod->bna = NULL;
 }
 
@@ -1851,6 +1863,13 @@ bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
 				&mcam_mod->free_handle_q);
 	}
 
+	/* A separate queue to allow synchronous setting of a list of MACs */
+	INIT_LIST_HEAD(&mcam_mod->del_q);
+	for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) {
+		bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
+		list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
+	}
+
 	mcam_mod->bna = bna;
 }
 
@@ -1864,6 +1883,9 @@ bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
 	list_for_each(qe, &mcam_mod->free_q) i++;
 
 	i = 0;
+	list_for_each(qe, &mcam_mod->del_q) i++;
+
+	i = 0;
 	list_for_each(qe, &mcam_mod->free_handle_q) i++;
 
 	mcam_mod->bna = NULL;
@@ -1976,7 +1998,7 @@ bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
 		BNA_MEM_T_KVA;
 	res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
 	res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
-		attr->num_ucmac * sizeof(struct bna_mac);
+		(attr->num_ucmac * 2) * sizeof(struct bna_mac);
 
 	/* Virtual memory for Multicast MAC address - stored by mcam module */
 	res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
@@ -1984,7 +2006,7 @@ bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
 		BNA_MEM_T_KVA;
 	res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
 	res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
-		attr->num_mcmac * sizeof(struct bna_mac);
+		(attr->num_mcmac * 2) * sizeof(struct bna_mac);
 
 	/* Virtual memory for Multicast handle - stored by mcam module */
 	res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
@@ -2080,41 +2102,21 @@ bna_num_rxp_set(struct bna *bna, int num_rxp)
 }
 
 struct bna_mac *
-bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
-{
-	struct list_head *qe;
-
-	if (list_empty(&ucam_mod->free_q))
-		return NULL;
-
-	bfa_q_deq(&ucam_mod->free_q, &qe);
-
-	return (struct bna_mac *)qe;
-}
-
-void
-bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
-{
-	list_add_tail(&mac->qe, &ucam_mod->free_q);
-}
-
-struct bna_mac *
-bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
+bna_cam_mod_mac_get(struct list_head *head)
 {
 	struct list_head *qe;
 
-	if (list_empty(&mcam_mod->free_q))
+	if (list_empty(head))
 		return NULL;
 
-	bfa_q_deq(&mcam_mod->free_q, &qe);
-
+	bfa_q_deq(head, &qe);
 	return (struct bna_mac *)qe;
 }
 
 void
-bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
+bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac)
 {
-	list_add_tail(&mac->qe, &mcam_mod->free_q);
+	list_add_tail(&mac->qe, tail);
 }
 
 struct bna_mcam_handle *
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index af3f7bb0b3b8..2702d02e98d9 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -322,6 +322,10 @@ do {									\
 #define	BNA_CQ_EF_REMOTE	(1 << 19)
 
 #define	BNA_CQ_EF_LOCAL		(1 << 20)
+/* CAT2 ASIC does not use bit 21 as per the SPEC.
+ * Bit 31 is set in every end of frame completion
+ */
+#define BNA_CQ_EF_EOP		(1 << 31)
 
 /* Data structures */
 
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 3c07064b2bc4..85e63546abe3 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -529,13 +529,13 @@ bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
 	struct list_head *qe;
 	int ret;
 
-	/* Delete multicast entries previousely added */
+	/* First delete multicast entries to maintain the count */
 	while (!list_empty(&rxf->mcast_pending_del_q)) {
 		bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
 		bfa_q_qe_init(qe);
 		mac = (struct bna_mac *)qe;
 		ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
-		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+		bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
 		if (ret)
 			return ret;
 	}
@@ -586,7 +586,7 @@ bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
 		bfa_q_qe_init(qe);
 		mac = (struct bna_mac *)qe;
 		ret = bna_rxf_mcast_del(rxf, mac, cleanup);
-		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+		bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
 		if (ret)
 			return ret;
 	}
@@ -796,20 +796,20 @@ bna_rxf_uninit(struct bna_rxf *rxf)
 	while (!list_empty(&rxf->ucast_pending_add_q)) {
 		bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
 		bfa_q_qe_init(&mac->qe);
-		bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+		bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
 	}
 
 	if (rxf->ucast_pending_mac) {
 		bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
-		bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
-			rxf->ucast_pending_mac);
+		bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
+				    rxf->ucast_pending_mac);
 		rxf->ucast_pending_mac = NULL;
 	}
 
 	while (!list_empty(&rxf->mcast_pending_add_q)) {
 		bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
 		bfa_q_qe_init(&mac->qe);
-		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+		bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
 	}
 
 	rxf->rxmode_pending = 0;
@@ -869,7 +869,7 @@ bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
 
 	if (rxf->ucast_pending_mac == NULL) {
 		rxf->ucast_pending_mac =
-				bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
+			bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
 		if (rxf->ucast_pending_mac == NULL)
 			return BNA_CB_UCAST_CAM_FULL;
 		bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
@@ -900,7 +900,7 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
 		return BNA_CB_SUCCESS;
 	}
 
-	mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+	mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
 	if (mac == NULL)
 		return BNA_CB_MCAST_LIST_FULL;
 	bfa_q_qe_init(&mac->qe);
@@ -916,35 +916,92 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
 }
 
 enum bna_cb_status
-bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
+bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
 		     void (*cbfn)(struct bnad *, struct bna_rx *))
 {
+	struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
 	struct bna_rxf *rxf = &rx->rxf;
 	struct list_head list_head;
 	struct list_head *qe;
 	u8 *mcaddr;
-	struct bna_mac *mac;
+	struct bna_mac *mac, *del_mac;
 	int i;
 
+	/* Purge the pending_add_q */
+	while (!list_empty(&rxf->ucast_pending_add_q)) {
+		bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
+		bfa_q_qe_init(qe);
+		mac = (struct bna_mac *)qe;
+		bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+	}
+
+	/* Schedule active_q entries for deletion */
+	while (!list_empty(&rxf->ucast_active_q)) {
+		bfa_q_deq(&rxf->ucast_active_q, &qe);
+		mac = (struct bna_mac *)qe;
+		bfa_q_qe_init(&mac->qe);
+
+		del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
+		memcpy(del_mac, mac, sizeof(*del_mac));
+		list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
+		bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+	}
+
 	/* Allocate nodes */
 	INIT_LIST_HEAD(&list_head);
-	for (i = 0, mcaddr = mclist; i < count; i++) {
-		mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+	for (i = 0, mcaddr = uclist; i < count; i++) {
+		mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
 		if (mac == NULL)
 			goto err_return;
 		bfa_q_qe_init(&mac->qe);
 		memcpy(mac->addr, mcaddr, ETH_ALEN);
 		list_add_tail(&mac->qe, &list_head);
-
 		mcaddr += ETH_ALEN;
 	}
 
+	/* Add the new entries */
+	while (!list_empty(&list_head)) {
+		bfa_q_deq(&list_head, &qe);
+		mac = (struct bna_mac *)qe;
+		bfa_q_qe_init(&mac->qe);
+		list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
+	}
+
+	rxf->cam_fltr_cbfn = cbfn;
+	rxf->cam_fltr_cbarg = rx->bna->bnad;
+	bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+
+	return BNA_CB_SUCCESS;
+
+err_return:
+	while (!list_empty(&list_head)) {
+		bfa_q_deq(&list_head, &qe);
+		mac = (struct bna_mac *)qe;
+		bfa_q_qe_init(&mac->qe);
+		bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+	}
+
+	return BNA_CB_UCAST_CAM_FULL;
+}
+
+enum bna_cb_status
+bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
+		     void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+	struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
+	struct bna_rxf *rxf = &rx->rxf;
+	struct list_head list_head;
+	struct list_head *qe;
+	u8 *mcaddr;
+	struct bna_mac *mac, *del_mac;
+	int i;
+
 	/* Purge the pending_add_q */
 	while (!list_empty(&rxf->mcast_pending_add_q)) {
 		bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
 		bfa_q_qe_init(qe);
 		mac = (struct bna_mac *)qe;
-		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+		bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
 	}
 
 	/* Schedule active_q entries for deletion */
@@ -952,7 +1009,26 @@ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
 		bfa_q_deq(&rxf->mcast_active_q, &qe);
 		mac = (struct bna_mac *)qe;
 		bfa_q_qe_init(&mac->qe);
-		list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
+
+		del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
+
+		memcpy(del_mac, mac, sizeof(*del_mac));
+		list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
+		mac->handle = NULL;
+		bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
+	}
+
+	/* Allocate nodes */
+	INIT_LIST_HEAD(&list_head);
+	for (i = 0, mcaddr = mclist; i < count; i++) {
+		mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
+		if (mac == NULL)
+			goto err_return;
+		bfa_q_qe_init(&mac->qe);
+		memcpy(mac->addr, mcaddr, ETH_ALEN);
+		list_add_tail(&mac->qe, &list_head);
+
+		mcaddr += ETH_ALEN;
 	}
 
 	/* Add the new entries */
@@ -974,13 +1050,56 @@ err_return:
 		bfa_q_deq(&list_head, &qe);
 		mac = (struct bna_mac *)qe;
 		bfa_q_qe_init(&mac->qe);
-		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+		bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
 	}
 
 	return BNA_CB_MCAST_LIST_FULL;
 }
 
 void
+bna_rx_mcast_delall(struct bna_rx *rx,
+		    void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+	struct bna_rxf *rxf = &rx->rxf;
+	struct list_head *qe;
+	struct bna_mac *mac, *del_mac;
+	int need_hw_config = 0;
+
+	/* Purge all entries from pending_add_q */
+	while (!list_empty(&rxf->mcast_pending_add_q)) {
+		bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
+		mac = (struct bna_mac *)qe;
+		bfa_q_qe_init(&mac->qe);
+		bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+	}
+
+	/* Schedule all entries in active_q for deletion */
+	while (!list_empty(&rxf->mcast_active_q)) {
+		bfa_q_deq(&rxf->mcast_active_q, &qe);
+		mac = (struct bna_mac *)qe;
+		bfa_q_qe_init(&mac->qe);
+
+		del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
+
+		memcpy(del_mac, mac, sizeof(*del_mac));
+		list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
+		mac->handle = NULL;
+		bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+		need_hw_config = 1;
+	}
+
+	if (need_hw_config) {
+		rxf->cam_fltr_cbfn = cbfn;
+		rxf->cam_fltr_cbarg = rx->bna->bnad;
+		bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+		return;
+	}
+
+	if (cbfn)
+		(*cbfn)(rx->bna->bnad, rx);
+}
+
+void
 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
 {
 	struct bna_rxf *rxf = &rx->rxf;
@@ -1022,7 +1141,7 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
 		bfa_q_qe_init(qe);
 		mac = (struct bna_mac *)qe;
 		bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
-		bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+		bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
 		return 1;
 	}
 
@@ -1062,11 +1181,13 @@ bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
 		bfa_q_qe_init(qe);
 		mac = (struct bna_mac *)qe;
 		if (cleanup == BNA_SOFT_CLEANUP)
-			bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+			bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
+					    mac);
 		else {
 			bna_bfi_ucast_req(rxf, mac,
 				BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
-			bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+			bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
+					    mac);
 			return 1;
 		}
 	}
@@ -1690,6 +1811,7 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
 	cfg_req->mh.num_entries = htons(
 		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
 
+	cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
 	cfg_req->num_queue_sets = rx->num_paths;
 	for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
 		i < rx->num_paths;
@@ -1711,8 +1833,17 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
 			/* Large/Single RxQ */
 			bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
 						&q0->qpt);
-			q0->buffer_size =
-				bna_enet_mtu_get(&rx->bna->enet);
+			if (q0->multi_buffer)
+				/* multi-buffer is enabled by allocating
+				 * a new rx with new set of resources.
+				 * q0->buffer_size should be initialized to
+				 * fragment size.
+				 */
+				cfg_req->rx_cfg.multi_buffer =
+					BNA_STATUS_T_ENABLED;
+			else
+				q0->buffer_size =
+					bna_enet_mtu_get(&rx->bna->enet);
 			cfg_req->q_cfg[i].ql.rx_buffer_size =
 				htons((u16)q0->buffer_size);
 			break;
@@ -2262,8 +2393,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
 	u32 hq_depth;
 	u32 dq_depth;
 
-	dq_depth = q_cfg->q_depth;
-	hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
+	dq_depth = q_cfg->q0_depth;
+	hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
 	cq_depth = dq_depth + hq_depth;
 
 	BNA_TO_POWER_OF_2_HIGH(cq_depth);
@@ -2380,10 +2511,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
 	struct bna_rxq *q0;
 	struct bna_rxq *q1;
 	struct bna_intr_info *intr_info;
-	u32 page_count;
+	struct bna_mem_descr *hqunmap_mem;
+	struct bna_mem_descr *dqunmap_mem;
 	struct bna_mem_descr *ccb_mem;
 	struct bna_mem_descr *rcb_mem;
-	struct bna_mem_descr *unmapq_mem;
 	struct bna_mem_descr *cqpt_mem;
 	struct bna_mem_descr *cswqpt_mem;
 	struct bna_mem_descr *cpage_mem;
@@ -2393,8 +2524,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
 	struct bna_mem_descr *dsqpt_mem;
 	struct bna_mem_descr *hpage_mem;
 	struct bna_mem_descr *dpage_mem;
-	int i;
-	int dpage_count, hpage_count, rcb_idx;
+	u32 dpage_count, hpage_count;
+	u32 hq_idx, dq_idx, rcb_idx;
+	u32 cq_depth, i;
+	u32 page_count;
 
 	if (!bna_rx_res_check(rx_mod, rx_cfg))
 		return NULL;
@@ -2402,7 +2535,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
 	intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
 	ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
 	rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
-	unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
+	dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
+	hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
 	cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
 	cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
 	cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
@@ -2454,7 +2588,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
 	}
 
 	rx->num_paths = rx_cfg->num_paths;
-	for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) {
+	for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
+			i < rx->num_paths; i++) {
 		rxp = bna_rxp_get(rx_mod);
 		list_add_tail(&rxp->qe, &rx->rxp_q);
 		rxp->type = rx_cfg->rxp_type;
@@ -2497,9 +2632,13 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
 		q0->rxp = rxp;
 
 		q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
-		q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
-		rcb_idx++;
-		q0->rcb->q_depth = rx_cfg->q_depth;
+		q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
+		rcb_idx++; dq_idx++;
+		q0->rcb->q_depth = rx_cfg->q0_depth;
+		q0->q_depth = rx_cfg->q0_depth;
+		q0->multi_buffer = rx_cfg->q0_multi_buf;
+		q0->buffer_size = rx_cfg->q0_buf_size;
+		q0->num_vecs = rx_cfg->q0_num_vecs;
 		q0->rcb->rxq = q0;
 		q0->rcb->bnad = bna->bnad;
 		q0->rcb->id = 0;
@@ -2519,15 +2658,18 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
 			q1->rxp = rxp;
 
 			q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
-			q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
-			rcb_idx++;
-			q1->rcb->q_depth = rx_cfg->q_depth;
+			q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
+			rcb_idx++; hq_idx++;
+			q1->rcb->q_depth = rx_cfg->q1_depth;
+			q1->q_depth = rx_cfg->q1_depth;
+			q1->multi_buffer = BNA_STATUS_T_DISABLED;
+			q1->num_vecs = 1;
 			q1->rcb->rxq = q1;
 			q1->rcb->bnad = bna->bnad;
 			q1->rcb->id = 1;
 			q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
 					rx_cfg->hds_config.forced_offset
-					: rx_cfg->small_buff_size;
+					: rx_cfg->q1_buf_size;
 			q1->rx_packets = q1->rx_bytes = 0;
 			q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
 
@@ -2542,9 +2684,14 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
 		/* Setup CQ */
 
 		rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
-		rxp->cq.ccb->q_depth =	rx_cfg->q_depth +
-					((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
-					0 : rx_cfg->q_depth);
+		cq_depth = rx_cfg->q0_depth +
+			((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
+			 0 : rx_cfg->q1_depth);
+		/* if multi-buffer is enabled sum of q0_depth
+		 * and q1_depth need not be a power of 2
+		 */
+		BNA_TO_POWER_OF_2_HIGH(cq_depth);
+		rxp->cq.ccb->q_depth = cq_depth;
 		rxp->cq.ccb->cq = &rxp->cq;
 		rxp->cq.ccb->rcb[0] = q0->rcb;
 		q0->rcb->ccb = rxp->cq.ccb;
@@ -2670,6 +2817,30 @@ bna_rx_cleanup_complete(struct bna_rx *rx)
 	bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
 }
 
+void
+bna_rx_vlan_strip_enable(struct bna_rx *rx)
+{
+	struct bna_rxf *rxf = &rx->rxf;
+
+	if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
+		rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
+		rxf->vlan_strip_pending = true;
+		bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+	}
+}
+
+void
+bna_rx_vlan_strip_disable(struct bna_rx *rx)
+{
+	struct bna_rxf *rxf = &rx->rxf;
+
+	if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
+		rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
+		rxf->vlan_strip_pending = true;
+		bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+	}
+}
+
 enum bna_cb_status
 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
 		enum bna_rxmode bitmask,
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index dc50f7836b6d..621547cd3504 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -109,20 +109,21 @@ enum bna_tx_res_req_type {
 enum bna_rx_mem_type {
 	BNA_RX_RES_MEM_T_CCB		= 0,	/* CQ context */
 	BNA_RX_RES_MEM_T_RCB		= 1,	/* CQ context */
-	BNA_RX_RES_MEM_T_UNMAPQ		= 2,	/* UnmapQ for RxQs */
-	BNA_RX_RES_MEM_T_CQPT		= 3,	/* CQ QPT */
-	BNA_RX_RES_MEM_T_CSWQPT		= 4,	/* S/W QPT */
-	BNA_RX_RES_MEM_T_CQPT_PAGE	= 5,	/* CQPT page */
-	BNA_RX_RES_MEM_T_HQPT		= 6,	/* RX QPT */
-	BNA_RX_RES_MEM_T_DQPT		= 7,	/* RX QPT */
-	BNA_RX_RES_MEM_T_HSWQPT		= 8,	/* RX s/w QPT */
-	BNA_RX_RES_MEM_T_DSWQPT		= 9,	/* RX s/w QPT */
-	BNA_RX_RES_MEM_T_DPAGE		= 10,	/* RX s/w QPT */
-	BNA_RX_RES_MEM_T_HPAGE		= 11,	/* RX s/w QPT */
-	BNA_RX_RES_MEM_T_IBIDX		= 12,
-	BNA_RX_RES_MEM_T_RIT		= 13,
-	BNA_RX_RES_T_INTR		= 14,	/* Rx interrupts */
-	BNA_RX_RES_T_MAX		= 15
+	BNA_RX_RES_MEM_T_UNMAPHQ	= 2,
+	BNA_RX_RES_MEM_T_UNMAPDQ	= 3,
+	BNA_RX_RES_MEM_T_CQPT		= 4,
+	BNA_RX_RES_MEM_T_CSWQPT		= 5,
+	BNA_RX_RES_MEM_T_CQPT_PAGE	= 6,
+	BNA_RX_RES_MEM_T_HQPT		= 7,
+	BNA_RX_RES_MEM_T_DQPT		= 8,
+	BNA_RX_RES_MEM_T_HSWQPT		= 9,
+	BNA_RX_RES_MEM_T_DSWQPT		= 10,
+	BNA_RX_RES_MEM_T_DPAGE		= 11,
+	BNA_RX_RES_MEM_T_HPAGE		= 12,
+	BNA_RX_RES_MEM_T_IBIDX		= 13,
+	BNA_RX_RES_MEM_T_RIT		= 14,
+	BNA_RX_RES_T_INTR		= 15,
+	BNA_RX_RES_T_MAX		= 16
 };
 
 enum bna_tx_type {
@@ -583,6 +584,8 @@ struct bna_rxq {
 
 	int			buffer_size;
 	int			q_depth;
+	u32			num_vecs;
+	enum bna_status		multi_buffer;
 
 	struct bna_qpt qpt;
 	struct bna_rcb *rcb;
@@ -632,6 +635,8 @@ struct bna_ccb {
 	struct bna_rcb *rcb[2];
 	void			*ctrl; /* For bnad */
 	struct bna_pkt_rate pkt_rate;
+	u32			pkts_una;
+	u32			bytes_per_intr;
 
 	/* Control path */
 	struct bna_cq *cq;
@@ -671,14 +676,22 @@ struct bna_rx_config {
 	int			num_paths;
 	enum bna_rxp_type rxp_type;
 	int			paused;
-	int			q_depth;
 	int			coalescing_timeo;
 	/*
 	 * Small/Large (or Header/Data) buffer size to be configured
-	 * for SLR and HDS queue type. Large buffer size comes from
-	 * enet->mtu.
+	 * for SLR and HDS queue type.
 	 */
-	int			small_buff_size;
+	u32			frame_size;
+
+	/* header or small queue */
+	u32			q1_depth;
+	u32			q1_buf_size;
+
+	/* data or large queue */
+	u32			q0_depth;
+	u32			q0_buf_size;
+	u32			q0_num_vecs;
+	enum bna_status		q0_multi_buf;
 
 	enum bna_status rss_status;
 	struct bna_rss_config rss_config;
@@ -866,8 +879,9 @@ struct bna_rx_mod {
 /* CAM */
 
 struct bna_ucam_mod {
-	struct bna_mac *ucmac;		/* BFI_MAX_UCMAC entries */
+	struct bna_mac *ucmac;		/* num_ucmac * 2 entries */
 	struct list_head			free_q;
+	struct list_head			del_q;
 
 	struct bna *bna;
 };
@@ -880,9 +894,10 @@ struct bna_mcam_handle {
 };
 
 struct bna_mcam_mod {
-	struct bna_mac *mcmac;		/* BFI_MAX_MCMAC entries */
-	struct bna_mcam_handle *mchandle;	/* BFI_MAX_MCMAC entries */
+	struct bna_mac *mcmac;		/* num_mcmac * 2 entries */
+	struct bna_mcam_handle *mchandle;	/* num_mcmac entries */
 	struct list_head			free_q;
+	struct list_head			del_q;
 	struct list_head			free_handle_q;
 
 	struct bna *bna;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 248bc37cb41b..cf64f3d0b60d 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
 /*
  * Global variables
  */
-u32 bnad_rxqs_per_cq = 2;
+static u32 bnad_rxqs_per_cq = 2;
 static u32 bna_id;
 static struct mutex bnad_list_mutex;
 static LIST_HEAD(bnad_list);
@@ -142,7 +142,8 @@ bnad_tx_buff_unmap(struct bnad *bnad,
 
 		dma_unmap_page(&bnad->pcidev->dev,
 			dma_unmap_addr(&unmap->vectors[vector], dma_addr),
-			skb_shinfo(skb)->frags[nvecs].size, DMA_TO_DEVICE);
+			dma_unmap_len(&unmap->vectors[vector], dma_len),
+			DMA_TO_DEVICE);
 		dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
 		nvecs--;
 	}
@@ -282,27 +283,32 @@ static int
 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
 {
 	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
-	int mtu, order;
+	int order;
 
 	bnad_rxq_alloc_uninit(bnad, rcb);
 
-	mtu = bna_enet_mtu_get(&bnad->bna.enet);
-	order = get_order(mtu);
+	order = get_order(rcb->rxq->buffer_size);
+
+	unmap_q->type = BNAD_RXBUF_PAGE;
 
 	if (bna_is_small_rxq(rcb->id)) {
 		unmap_q->alloc_order = 0;
 		unmap_q->map_size = rcb->rxq->buffer_size;
 	} else {
-		unmap_q->alloc_order = order;
-		unmap_q->map_size =
-			(rcb->rxq->buffer_size > 2048) ?
-			PAGE_SIZE << order : 2048;
+		if (rcb->rxq->multi_buffer) {
+			unmap_q->alloc_order = 0;
+			unmap_q->map_size = rcb->rxq->buffer_size;
+			unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
+		} else {
+			unmap_q->alloc_order = order;
+			unmap_q->map_size =
+				(rcb->rxq->buffer_size > 2048) ?
+				PAGE_SIZE << order : 2048;
+		}
 	}
 
 	BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
 
-	unmap_q->type = BNAD_RXBUF_PAGE;
-
 	return 0;
 }
 
@@ -345,10 +351,10 @@ bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
 	for (i = 0; i < rcb->q_depth; i++) {
 		struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
 
-		if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
-			bnad_rxq_cleanup_page(bnad, unmap);
-		else
+		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
 			bnad_rxq_cleanup_skb(bnad, unmap);
+		else
+			bnad_rxq_cleanup_page(bnad, unmap);
 	}
 	bnad_rxq_alloc_uninit(bnad, rcb);
 }
@@ -480,10 +486,10 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
 	if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
 		return;
 
-	if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
-		bnad_rxq_refill_page(bnad, rcb, to_alloc);
-	else
+	if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
 		bnad_rxq_refill_skb(bnad, rcb, to_alloc);
+	else
+		bnad_rxq_refill_page(bnad, rcb, to_alloc);
 }
 
 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
@@ -500,72 +506,114 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
 				BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
 
-static inline struct sk_buff *
-bnad_cq_prepare_skb(struct bnad_rx_ctrl *rx_ctrl,
-		struct bnad_rx_unmap_q *unmap_q,
-		struct bnad_rx_unmap *unmap,
-		u32 length, u32 flags)
+static void
+bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
+		    u32 sop_ci, u32 nvecs)
 {
-	struct bnad *bnad = rx_ctrl->bnad;
-	struct sk_buff *skb;
+	struct bnad_rx_unmap_q *unmap_q;
+	struct bnad_rx_unmap *unmap;
+	u32 ci, vec;
 
-	if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) {
-		skb = napi_get_frags(&rx_ctrl->napi);
-		if (unlikely(!skb))
-			return NULL;
+	unmap_q = rcb->unmap_q;
+	for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
+		unmap = &unmap_q->unmap[ci];
+		BNA_QE_INDX_INC(ci, rcb->q_depth);
+
+		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
+			bnad_rxq_cleanup_skb(bnad, unmap);
+		else
+			bnad_rxq_cleanup_page(bnad, unmap);
+	}
+}
+
+static void
+bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
+			u32 sop_ci, u32 nvecs, u32 last_fraglen)
+{
+	struct bnad *bnad;
+	u32 ci, vec, len, totlen = 0;
+	struct bnad_rx_unmap_q *unmap_q;
+	struct bnad_rx_unmap *unmap;
+
+	unmap_q = rcb->unmap_q;
+	bnad = rcb->bnad;
+
+	/* prefetch header */
+	prefetch(page_address(unmap_q->unmap[sop_ci].page) +
+			unmap_q->unmap[sop_ci].page_offset);
+
+	for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
+		unmap = &unmap_q->unmap[ci];
+		BNA_QE_INDX_INC(ci, rcb->q_depth);
 
 		dma_unmap_page(&bnad->pcidev->dev,
 				dma_unmap_addr(&unmap->vector, dma_addr),
 				unmap->vector.len, DMA_FROM_DEVICE);
+
+		len = (vec == nvecs) ?
+			last_fraglen : unmap->vector.len;
+		totlen += len;
+
 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-				unmap->page, unmap->page_offset, length);
-		skb->len += length;
-		skb->data_len += length;
-		skb->truesize += length;
+				unmap->page, unmap->page_offset, len);
 
 		unmap->page = NULL;
 		unmap->vector.len = 0;
-
-		return skb;
 	}
 
-	skb = unmap->skb;
-	BUG_ON(!skb);
+	skb->len += totlen;
+	skb->data_len += totlen;
+	skb->truesize += totlen;
+}
+
+static inline void
+bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
+		  struct bnad_rx_unmap *unmap, u32 len)
+{
+	prefetch(skb->data);
 
 	dma_unmap_single(&bnad->pcidev->dev,
 			dma_unmap_addr(&unmap->vector, dma_addr),
 			unmap->vector.len, DMA_FROM_DEVICE);
 
-	skb_put(skb, length);
-
+	skb_put(skb, len);
 	skb->protocol = eth_type_trans(skb, bnad->netdev);
 
 	unmap->skb = NULL;
 	unmap->vector.len = 0;
-	return skb;
 }
 
 static u32
 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
 {
-	struct bna_cq_entry *cq, *cmpl;
+	struct bna_cq_entry *cq, *cmpl, *next_cmpl;
 	struct bna_rcb *rcb = NULL;
 	struct bnad_rx_unmap_q *unmap_q;
-	struct bnad_rx_unmap *unmap;
-	struct sk_buff *skb;
+	struct bnad_rx_unmap *unmap = NULL;
+	struct sk_buff *skb = NULL;
 	struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
 	struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
-	u32 packets = 0, length = 0, flags, masked_flags;
+	u32 packets = 0, len = 0, totlen = 0;
+	u32 pi, vec, sop_ci = 0, nvecs = 0;
+	u32 flags, masked_flags;
 
 	prefetch(bnad->netdev);
 
 	cq = ccb->sw_q;
 	cmpl = &cq[ccb->producer_index];
 
-	while (cmpl->valid && (packets < budget)) {
-		packets++;
-		flags = ntohl(cmpl->flags);
-		length = ntohs(cmpl->length);
+	while (packets < budget) {
+		if (!cmpl->valid)
+			break;
+		/* The 'valid' field is set by the adapter, only after writing
+		 * the other fields of completion entry. Hence, do not load
+		 * other fields of completion entry *before* the 'valid' is
+		 * loaded. Adding the rmb() here prevents the compiler and/or
+		 * CPU from reordering the reads which would potentially result
+		 * in reading stale values in completion entry.
+		 */
+		rmb();
+
 		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
 
 		if (bna_is_small_rxq(cmpl->rxq_id))
@@ -574,25 +622,78 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
 			rcb = ccb->rcb[0];
 
 		unmap_q = rcb->unmap_q;
-		unmap = &unmap_q->unmap[rcb->consumer_index];
 
-		if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
-					BNA_CQ_EF_FCS_ERROR |
-					BNA_CQ_EF_TOO_LONG))) {
-			if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
-				bnad_rxq_cleanup_page(bnad, unmap);
-			else
-				bnad_rxq_cleanup_skb(bnad, unmap);
+		/* start of packet ci */
+		sop_ci = rcb->consumer_index;
 
+		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
+			unmap = &unmap_q->unmap[sop_ci];
+			skb = unmap->skb;
+		} else {
+			skb = napi_get_frags(&rx_ctrl->napi);
+			if (unlikely(!skb))
+				break;
+		}
+		prefetch(skb);
+
+		flags = ntohl(cmpl->flags);
+		len = ntohs(cmpl->length);
+		totlen = len;
+		nvecs = 1;
+
+		/* Check all the completions for this frame.
+		 * busy-wait doesn't help much, break here.
+		 */
+		if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
+		    (flags & BNA_CQ_EF_EOP) == 0) {
+			pi = ccb->producer_index;
+			do {
+				BNA_QE_INDX_INC(pi, ccb->q_depth);
+				next_cmpl = &cq[pi];
+
+				if (!next_cmpl->valid)
+					break;
+				/* The 'valid' field is set by the adapter, only
+				 * after writing the other fields of completion
+				 * entry. Hence, do not load other fields of
+				 * completion entry *before* the 'valid' is
+				 * loaded. Adding the rmb() here prevents the
+				 * compiler and/or CPU from reordering the reads
+				 * which would potentially result in reading
+				 * stale values in completion entry.
+				 */
+				rmb();
+
+				len = ntohs(next_cmpl->length);
+				flags = ntohl(next_cmpl->flags);
+
+				nvecs++;
+				totlen += len;
+			} while ((flags & BNA_CQ_EF_EOP) == 0);
+
+			if (!next_cmpl->valid)
+				break;
+		}
+
+		/* TODO: BNA_CQ_EF_LOCAL ? */
+		if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
+						BNA_CQ_EF_FCS_ERROR |
+						BNA_CQ_EF_TOO_LONG))) {
+			bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
 			rcb->rxq->rx_packets_with_error++;
+
 			goto next;
 		}
 
-		skb = bnad_cq_prepare_skb(ccb->ctrl, unmap_q, unmap,
-				length, flags);
+		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
+			bnad_cq_setup_skb(bnad, skb, unmap, len);
+		else
+			bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
 
-		if (unlikely(!skb))
-			break;
+		packets++;
+		rcb->rxq->rx_packets++;
+		rcb->rxq->rx_bytes += totlen;
+		ccb->bytes_per_intr += totlen;
 
 		masked_flags = flags & flags_cksum_prot_mask;
 
@@ -606,21 +707,21 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
 		else
 			skb_checksum_none_assert(skb);
 
-		rcb->rxq->rx_packets++;
-		rcb->rxq->rx_bytes += length;
-
 		if (flags & BNA_CQ_EF_VLAN)
 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
 
-		if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
-			napi_gro_frags(&rx_ctrl->napi);
-		else
+		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
 			netif_receive_skb(skb);
+		else
+			napi_gro_frags(&rx_ctrl->napi);
 
 next:
-		cmpl->valid = 0;
-		BNA_QE_INDX_INC(rcb->consumer_index, rcb->q_depth);
-		BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
+		BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
+		for (vec = 0; vec < nvecs; vec++) {
+			cmpl = &cq[ccb->producer_index];
+			cmpl->valid = 0;
+			BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
+		}
 		cmpl = &cq[ccb->producer_index];
 	}
 
@@ -1899,8 +2000,10 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
 	tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
 			tx_info);
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
-	if (!tx)
+	if (!tx) {
+		err = -ENOMEM;
 		goto err_return;
+	}
 	tx_info->tx = tx;
 
 	INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
@@ -1911,7 +2014,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
 		err = bnad_tx_msix_register(bnad, tx_info,
 			tx_id, bnad->num_txq_per_tx);
 		if (err)
-			goto err_return;
+			goto cleanup_tx;
 	}
 
 	spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1920,6 +2023,12 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
 
 	return 0;
 
+cleanup_tx:
+	spin_lock_irqsave(&bnad->bna_lock, flags);
+	bna_tx_destroy(tx_info->tx);
+	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+	tx_info->tx = NULL;
+	tx_info->tx_id = 0;
 err_return:
 	bnad_tx_res_free(bnad, res_info);
 	return err;
@@ -1930,6 +2039,7 @@ err_return:
 static void
 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
 {
+	memset(rx_config, 0, sizeof(*rx_config));
 	rx_config->rx_type = BNA_RX_T_REGULAR;
 	rx_config->num_paths = bnad->num_rxp_per_rx;
 	rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
@@ -1950,10 +2060,39 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
 		memset(&rx_config->rss_config, 0,
 		       sizeof(rx_config->rss_config));
 	}
+
+	rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
+	rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
+
+	/* BNA_RXP_SINGLE - one data-buffer queue
+	 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
+	 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
+	 */
+	/* TODO: configurable param for queue type */
 	rx_config->rxp_type = BNA_RXP_SLR;
-	rx_config->q_depth = bnad->rxq_depth;
 
-	rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
+	if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
+	    rx_config->frame_size > 4096) {
+		/* though size_routing_enable is set in SLR,
+		 * small packets may get routed to same rxq.
+		 * set buf_size to 2048 instead of PAGE_SIZE.
+		 */
+		rx_config->q0_buf_size = 2048;
+		/* this should be in multiples of 2 */
+		rx_config->q0_num_vecs = 4;
+		rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
+		rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
+	} else {
+		rx_config->q0_buf_size = rx_config->frame_size;
+		rx_config->q0_num_vecs = 1;
+		rx_config->q0_depth = bnad->rxq_depth;
+	}
+
+	/* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
+	if (rx_config->rxp_type == BNA_RXP_SLR) {
+		rx_config->q1_depth = bnad->rxq_depth;
+		rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
+	}
 
 	rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
 }
@@ -1969,6 +2108,49 @@ bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
 }
 
 /* Called with mutex_lock(&bnad->conf_mutex) held */
+static u32
+bnad_reinit_rx(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+	u32 err = 0, current_err = 0;
+	u32 rx_id = 0, count = 0;
+	unsigned long flags;
+
+	/* destroy and create new rx objects */
+	for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
+		if (!bnad->rx_info[rx_id].rx)
+			continue;
+		bnad_destroy_rx(bnad, rx_id);
+	}
+
+	spin_lock_irqsave(&bnad->bna_lock, flags);
+	bna_enet_mtu_set(&bnad->bna.enet,
+			 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
+	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+	for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
+		count++;
+		current_err = bnad_setup_rx(bnad, rx_id);
+		if (current_err && !err) {
+			err = current_err;
+			pr_err("RXQ:%u setup failed\n", rx_id);
+		}
+	}
+
+	/* restore rx configuration */
+	if (bnad->rx_info[0].rx && !err) {
+		bnad_restore_vlans(bnad, 0);
+		bnad_enable_default_bcast(bnad);
+		spin_lock_irqsave(&bnad->bna_lock, flags);
+		bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
+		spin_unlock_irqrestore(&bnad->bna_lock, flags);
+		bnad_set_rx_mode(netdev);
+	}
+
+	return count;
+}
+
+/* Called with bnad_conf_lock() held */
 void
 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
 {
@@ -2047,13 +2229,19 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
 	/* Fill Unmap Q memory requirements */
-	BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPQ],
-			rx_config->num_paths +
-			((rx_config->rxp_type == BNA_RXP_SINGLE) ?
-			 0 : rx_config->num_paths),
-			((bnad->rxq_depth * sizeof(struct bnad_rx_unmap)) +
-			 sizeof(struct bnad_rx_unmap_q)));
-
+	BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
+				 rx_config->num_paths,
+			(rx_config->q0_depth *
+			 sizeof(struct bnad_rx_unmap)) +
+			 sizeof(struct bnad_rx_unmap_q));
+
+	if (rx_config->rxp_type != BNA_RXP_SINGLE) {
+		BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
+					 rx_config->num_paths,
+				(rx_config->q1_depth *
+				 sizeof(struct bnad_rx_unmap) +
+				 sizeof(struct bnad_rx_unmap_q)));
+	}
 	/* Allocate resource */
 	err = bnad_rx_res_alloc(bnad, res_info, rx_id);
 	if (err)
@@ -2548,7 +2736,6 @@ bnad_open(struct net_device *netdev)
 	int err;
 	struct bnad *bnad = netdev_priv(netdev);
 	struct bna_pause_config pause_config;
-	int mtu;
 	unsigned long flags;
 
 	mutex_lock(&bnad->conf_mutex);
@@ -2567,10 +2754,9 @@ bnad_open(struct net_device *netdev)
 	pause_config.tx_pause = 0;
 	pause_config.rx_pause = 0;
 
-	mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
-
 	spin_lock_irqsave(&bnad->bna_lock, flags);
-	bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
+	bna_enet_mtu_set(&bnad->bna.enet,
+			 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
 	bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
 	bna_enet_enable(&bnad->bna.enet);
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -2624,9 +2810,6 @@ bnad_stop(struct net_device *netdev)
 	bnad_destroy_tx(bnad, 0);
 	bnad_destroy_rx(bnad, 0);
 
-	/* These config flags are cleared in the hardware */
-	bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | BNAD_CF_PROMISC);
-
 	/* Synchronize mailbox IRQ */
 	bnad_mbox_irq_sync(bnad);
 
@@ -2784,21 +2967,21 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 	}
 
 	tcb = bnad->tx_info[0].tcb[txq_id];
-	q_depth = tcb->q_depth;
-	prod = tcb->producer_index;
-
-	unmap_q = tcb->unmap_q;
 
 	/*
 	 * Takes care of the Tx that is scheduled between clearing the flag
 	 * and the netif_tx_stop_all_queues() call.
 	 */
-	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
+	if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
 		dev_kfree_skb(skb);
 		BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
 		return NETDEV_TX_OK;
 	}
 
+	q_depth = tcb->q_depth;
+	prod = tcb->producer_index;
+	unmap_q = tcb->unmap_q;
+
 	vectors = 1 + skb_shinfo(skb)->nr_frags;
 	wis = BNA_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
 
@@ -2863,7 +3046,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 
 	for (i = 0, vect_id = 0; i < vectors - 1; i++) {
 		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
-		u16		size = skb_frag_size(frag);
+		u32		size = skb_frag_size(frag);
 
 		if (unlikely(size == 0)) {
 			/* Undo the changes starting at tcb->producer_index */
@@ -2888,10 +3071,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 
 		dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
 					    0, size, DMA_TO_DEVICE);
+		dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
 		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
 		txqent->vector[vect_id].length = htons(size);
 		dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
-						dma_addr);
+				   dma_addr);
 		head_unmap->nvecs++;
 	}
 
@@ -2911,6 +3095,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
 		return NETDEV_TX_OK;
 
+	skb_tx_timestamp(skb);
+
 	bna_txq_prod_indx_doorbell(tcb);
 	smp_mb();
 
@@ -2937,73 +3123,133 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
 	return stats;
 }
 
+static void
+bnad_set_rx_ucast_fltr(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+	int uc_count = netdev_uc_count(netdev);
+	enum bna_cb_status ret;
+	u8 *mac_list;
+	struct netdev_hw_addr *ha;
+	int entry;
+
+	if (netdev_uc_empty(bnad->netdev)) {
+		bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+		return;
+	}
+
+	if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
+		goto mode_default;
+
+	mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
+	if (mac_list == NULL)
+		goto mode_default;
+
+	entry = 0;
+	netdev_for_each_uc_addr(ha, netdev) {
+		memcpy(&mac_list[entry * ETH_ALEN],
+		       &ha->addr[0], ETH_ALEN);
+		entry++;
+	}
+
+	ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
+			mac_list, NULL);
+	kfree(mac_list);
+
+	if (ret != BNA_CB_SUCCESS)
+		goto mode_default;
+
+	return;
+
+	/* ucast packets not in UCAM are routed to default function */
+mode_default:
+	bnad->cfg_flags |= BNAD_CF_DEFAULT;
+	bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+}
+
+static void
+bnad_set_rx_mcast_fltr(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+	int mc_count = netdev_mc_count(netdev);
+	enum bna_cb_status ret;
+	u8 *mac_list;
+
+	if (netdev->flags & IFF_ALLMULTI)
+		goto mode_allmulti;
+
+	if (netdev_mc_empty(netdev))
+		return;
+
+	if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
+		goto mode_allmulti;
+
+	mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
+
+	if (mac_list == NULL)
+		goto mode_allmulti;
+
+	memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+
+	/* copy rest of the MCAST addresses */
+	bnad_netdev_mc_list_get(netdev, mac_list);
+	ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
+			mac_list, NULL);
+	kfree(mac_list);
+
+	if (ret != BNA_CB_SUCCESS)
+		goto mode_allmulti;
+
+	return;
+
+mode_allmulti:
+	bnad->cfg_flags |= BNAD_CF_ALLMULTI;
+	bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
+}
+
 void
 bnad_set_rx_mode(struct net_device *netdev)
 {
 	struct bnad *bnad = netdev_priv(netdev);
-	u32	new_mask, valid_mask;
+	enum bna_rxmode new_mode, mode_mask;
 	unsigned long flags;
 
 	spin_lock_irqsave(&bnad->bna_lock, flags);
 
-	new_mask = valid_mask = 0;
-
-	if (netdev->flags & IFF_PROMISC) {
-		if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
-			new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
-			valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
-			bnad->cfg_flags |= BNAD_CF_PROMISC;
-		}
-	} else {
-		if (bnad->cfg_flags & BNAD_CF_PROMISC) {
-			new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
-			valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
-			bnad->cfg_flags &= ~BNAD_CF_PROMISC;
-		}
-	}
-
-	if (netdev->flags & IFF_ALLMULTI) {
-		if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
-			new_mask |= BNA_RXMODE_ALLMULTI;
-			valid_mask |= BNA_RXMODE_ALLMULTI;
-			bnad->cfg_flags |= BNAD_CF_ALLMULTI;
-		}
-	} else {
-		if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
-			new_mask &= ~BNA_RXMODE_ALLMULTI;
-			valid_mask |= BNA_RXMODE_ALLMULTI;
-			bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
-		}
+	if (bnad->rx_info[0].rx == NULL) {
+		spin_unlock_irqrestore(&bnad->bna_lock, flags);
+		return;
 	}
 
-	if (bnad->rx_info[0].rx == NULL)
-		goto unlock;
+	/* clear bnad flags to update it with new settings */
+	bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
+			BNAD_CF_ALLMULTI);
 
-	bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
+	new_mode = 0;
+	if (netdev->flags & IFF_PROMISC) {
+		new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
+		bnad->cfg_flags |= BNAD_CF_PROMISC;
+	} else {
+		bnad_set_rx_mcast_fltr(bnad);
 
-	if (!netdev_mc_empty(netdev)) {
-		u8 *mcaddr_list;
-		int mc_count = netdev_mc_count(netdev);
+		if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
+			new_mode |= BNA_RXMODE_ALLMULTI;
 
-		/* Index 0 holds the broadcast address */
-		mcaddr_list =
-			kzalloc((mc_count + 1) * ETH_ALEN,
-				GFP_ATOMIC);
-		if (!mcaddr_list)
-			goto unlock;
+		bnad_set_rx_ucast_fltr(bnad);
 
-		memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+		if (bnad->cfg_flags & BNAD_CF_DEFAULT)
+			new_mode |= BNA_RXMODE_DEFAULT;
+	}
 
-		/* Copy rest of the MC addresses */
-		bnad_netdev_mc_list_get(netdev, mcaddr_list);
+	mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
+			BNA_RXMODE_ALLMULTI;
+	bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
 
-		bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
-					mcaddr_list, NULL);
+	if (bnad->cfg_flags & BNAD_CF_PROMISC)
+		bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
+	else
+		bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
 
-		/* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
-		kfree(mcaddr_list);
-	}
-unlock:
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 }
 
@@ -3033,14 +3279,14 @@ bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
 }
 
 static int
-bnad_mtu_set(struct bnad *bnad, int mtu)
+bnad_mtu_set(struct bnad *bnad, int frame_size)
 {
 	unsigned long flags;
 
 	init_completion(&bnad->bnad_completions.mtu_comp);
 
 	spin_lock_irqsave(&bnad->bna_lock, flags);
-	bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
+	bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
 	wait_for_completion(&bnad->bnad_completions.mtu_comp);
@@ -3051,18 +3297,34 @@ bnad_mtu_set(struct bnad *bnad, int mtu)
 static int
 bnad_change_mtu(struct net_device *netdev, int new_mtu)
 {
-	int err, mtu = netdev->mtu;
+	int err, mtu;
 	struct bnad *bnad = netdev_priv(netdev);
+	u32 rx_count = 0, frame, new_frame;
 
 	if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
 		return -EINVAL;
 
 	mutex_lock(&bnad->conf_mutex);
 
+	mtu = netdev->mtu;
 	netdev->mtu = new_mtu;
 
-	mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
-	err = bnad_mtu_set(bnad, mtu);
+	frame = BNAD_FRAME_SIZE(mtu);
+	new_frame = BNAD_FRAME_SIZE(new_mtu);
+
+	/* check if multi-buffer needs to be enabled */
+	if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
+	    netif_running(bnad->netdev)) {
+		/* only when transition is over 4K */
+		if ((frame <= 4096 && new_frame > 4096) ||
+		    (frame > 4096 && new_frame <= 4096))
+			rx_count = bnad_reinit_rx(bnad);
+	}
+
+	/* rx_count > 0 - new rx created
+	 *	- Linux set err = 0 and return
+	 */
+	err = bnad_mtu_set(bnad, new_frame);
 	if (err)
 		err = -EBUSY;
 
@@ -3262,7 +3524,6 @@ bnad_uninit(struct bnad *bnad)
 
 	if (bnad->bar0)
 		iounmap(bnad->bar0);
-	pci_set_drvdata(bnad->pcidev, NULL);
 }
 
 /*
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index f7e033f8a00e..2842c188e0da 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -71,7 +71,7 @@ struct bnad_rx_ctrl {
 #define BNAD_NAME			"bna"
 #define BNAD_NAME_LEN			64
 
-#define BNAD_VERSION			"3.2.21.1"
+#define BNAD_VERSION			"3.2.23.0"
 
 #define BNAD_MAILBOX_MSIX_INDEX		0
 #define BNAD_MAILBOX_MSIX_VECTORS	1
@@ -84,7 +84,7 @@ struct bnad_rx_ctrl {
 #define BNAD_IOCETH_TIMEOUT	     10000
 
 #define BNAD_MIN_Q_DEPTH		512
-#define BNAD_MAX_RXQ_DEPTH		2048
+#define BNAD_MAX_RXQ_DEPTH		16384
 #define BNAD_MAX_TXQ_DEPTH		2048
 
 #define BNAD_JUMBO_MTU			9000
@@ -105,6 +105,9 @@ struct bnad_rx_ctrl {
 #define BNAD_NUM_TXQ			(bnad->num_tx * bnad->num_txq_per_tx)
 #define BNAD_NUM_RXP			(bnad->num_rx * bnad->num_rxp_per_rx)
 
+#define BNAD_FRAME_SIZE(_mtu) \
+	(ETH_HLEN + VLAN_HLEN + (_mtu) + ETH_FCS_LEN)
+
 /*
  * DATA STRUCTURES
  */
@@ -219,6 +222,7 @@ struct bnad_rx_info {
 
 struct bnad_tx_vector {
 	DEFINE_DMA_UNMAP_ADDR(dma_addr);
+	DEFINE_DMA_UNMAP_LEN(dma_len);
 };
 
 struct bnad_tx_unmap {
@@ -234,33 +238,38 @@ struct bnad_rx_vector {
 
 struct bnad_rx_unmap {
 	struct page		*page;
-	u32			page_offset;
 	struct sk_buff		*skb;
 	struct bnad_rx_vector	vector;
+	u32			page_offset;
 };
 
 enum bnad_rxbuf_type {
 	BNAD_RXBUF_NONE		= 0,
-	BNAD_RXBUF_SKB		= 1,
+	BNAD_RXBUF_SK_BUFF	= 1,
 	BNAD_RXBUF_PAGE		= 2,
-	BNAD_RXBUF_MULTI	= 3
+	BNAD_RXBUF_MULTI_BUFF	= 3
 };
 
-#define BNAD_RXBUF_IS_PAGE(_type)	((_type) == BNAD_RXBUF_PAGE)
+#define BNAD_RXBUF_IS_SK_BUFF(_type)	((_type) == BNAD_RXBUF_SK_BUFF)
+#define BNAD_RXBUF_IS_MULTI_BUFF(_type)	((_type) == BNAD_RXBUF_MULTI_BUFF)
 
 struct bnad_rx_unmap_q {
 	int			reuse_pi;
 	int			alloc_order;
 	u32			map_size;
 	enum bnad_rxbuf_type	type;
-	struct bnad_rx_unmap	unmap[0];
+	struct bnad_rx_unmap	unmap[0] ____cacheline_aligned;
 };
 
+#define BNAD_PCI_DEV_IS_CAT2(_bnad) \
+	((_bnad)->pcidev->device == BFA_PCI_DEVICE_ID_CT2)
+
 /* Bit mask values for bnad->cfg_flags */
 #define	BNAD_CF_DIM_ENABLED		0x01	/* DIM */
 #define	BNAD_CF_PROMISC			0x02
 #define BNAD_CF_ALLMULTI		0x04
-#define	BNAD_CF_MSIX			0x08	/* If in MSIx mode */
+#define	BNAD_CF_DEFAULT			0x08
+#define	BNAD_CF_MSIX			0x10	/* If in MSIx mode */
 
 /* Defines for run_flags bit-mask */
 /* Set, tested & cleared using xxx_bit() functions */
@@ -367,7 +376,6 @@ struct bnad_drvinfo {
  * EXTERN VARIABLES
  */
 extern const struct firmware *bfi_fw;
-extern u32		bnad_rxqs_per_cq;
 
 /*
  * EXTERN PROTOTYPES
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 455b5a2e59d4..f9e150825bb5 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -1131,6 +1131,7 @@ static const struct ethtool_ops bnad_ethtool_ops = {
 	.get_eeprom = bnad_get_eeprom,
 	.set_eeprom = bnad_set_eeprom,
 	.flash_device = bnad_flash_device,
+	.get_ts_info = ethtool_op_get_ts_info,
 };
 
 void
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 43405f654b4a..b3ff6d507951 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -37,8 +37,8 @@
 
 extern char bfa_version[];
 
-#define CNA_FW_FILE_CT	"ctfw-3.2.1.1.bin"
-#define CNA_FW_FILE_CT2	"ct2fw-3.2.1.1.bin"
+#define CNA_FW_FILE_CT	"ctfw-3.2.3.0.bin"
+#define CNA_FW_FILE_CT2	"ct2fw-3.2.3.0.bin"
 #define FC_SYMNAME_MAX	256	/*!< max name server symbolic name size */
 
 #pragma pack(1)
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 92578690f6de..3190d38e16fb 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -17,6 +17,7 @@
 #include <linux/circ_buf.h>
 #include <linux/slab.h>
 #include <linux/init.h>
+#include <linux/io.h>
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
@@ -203,6 +204,47 @@ static int macb_mdio_reset(struct mii_bus *bus)
 	return 0;
 }
 
+/**
+ * macb_set_tx_clk() - Set a clock to a new frequency
+ * @clk		Pointer to the clock to change
+ * @rate	New frequency in Hz
+ * @dev		Pointer to the struct net_device
+ */
+static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
+{
+	long ferr, rate, rate_rounded;
+
+	switch (speed) {
+	case SPEED_10:
+		rate = 2500000;
+		break;
+	case SPEED_100:
+		rate = 25000000;
+		break;
+	case SPEED_1000:
+		rate = 125000000;
+		break;
+	default:
+		return;
+	}
+
+	rate_rounded = clk_round_rate(clk, rate);
+	if (rate_rounded < 0)
+		return;
+
+	/* RGMII allows 50 ppm frequency error. Test and warn if this limit
+	 * is not satisfied.
+	 */
+	ferr = abs(rate_rounded - rate);
+	ferr = DIV_ROUND_UP(ferr, rate / 100000);
+	if (ferr > 5)
+		netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
+				rate);
+
+	if (clk_set_rate(clk, rate_rounded))
+		netdev_err(dev, "adjusting tx_clk failed.\n");
+}
+
 static void macb_handle_link_change(struct net_device *dev)
 {
 	struct macb *bp = netdev_priv(dev);
@@ -250,6 +292,9 @@ static void macb_handle_link_change(struct net_device *dev)
 
 	spin_unlock_irqrestore(&bp->lock, flags);
 
+	if (!IS_ERR(bp->tx_clk))
+		macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
+
 	if (status_change) {
 		if (phydev->link) {
 			netif_carrier_on(dev);
@@ -1790,21 +1835,44 @@ static int __init macb_probe(struct platform_device *pdev)
 	spin_lock_init(&bp->lock);
 	INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
 
-	bp->pclk = clk_get(&pdev->dev, "pclk");
+	bp->pclk = devm_clk_get(&pdev->dev, "pclk");
 	if (IS_ERR(bp->pclk)) {
-		dev_err(&pdev->dev, "failed to get macb_clk\n");
+		err = PTR_ERR(bp->pclk);
+		dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
 		goto err_out_free_dev;
 	}
-	clk_prepare_enable(bp->pclk);
 
-	bp->hclk = clk_get(&pdev->dev, "hclk");
+	bp->hclk = devm_clk_get(&pdev->dev, "hclk");
 	if (IS_ERR(bp->hclk)) {
-		dev_err(&pdev->dev, "failed to get hclk\n");
-		goto err_out_put_pclk;
+		err = PTR_ERR(bp->hclk);
+		dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
+		goto err_out_free_dev;
 	}
-	clk_prepare_enable(bp->hclk);
 
-	bp->regs = ioremap(regs->start, resource_size(regs));
+	bp->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+
+	err = clk_prepare_enable(bp->pclk);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
+		goto err_out_free_dev;
+	}
+
+	err = clk_prepare_enable(bp->hclk);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
+		goto err_out_disable_pclk;
+	}
+
+	if (!IS_ERR(bp->tx_clk)) {
+		err = clk_prepare_enable(bp->tx_clk);
+		if (err) {
+			dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n",
+					err);
+			goto err_out_disable_hclk;
+		}
+	}
+
+	bp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
 	if (!bp->regs) {
 		dev_err(&pdev->dev, "failed to map registers, aborting.\n");
 		err = -ENOMEM;
@@ -1812,11 +1880,12 @@ static int __init macb_probe(struct platform_device *pdev)
 	}
 
 	dev->irq = platform_get_irq(pdev, 0);
-	err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
+	err = devm_request_irq(&pdev->dev, dev->irq, macb_interrupt, 0,
+			dev->name, dev);
 	if (err) {
 		dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
 			dev->irq, err);
-		goto err_out_iounmap;
+		goto err_out_disable_clocks;
 	}
 
 	dev->netdev_ops = &macb_netdev_ops;
@@ -1879,7 +1948,7 @@ static int __init macb_probe(struct platform_device *pdev)
 	err = register_netdev(dev);
 	if (err) {
 		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
-		goto err_out_free_irq;
+		goto err_out_disable_clocks;
 	}
 
 	err = macb_mii_init(bp);
@@ -1902,16 +1971,13 @@ static int __init macb_probe(struct platform_device *pdev)
 
 err_out_unregister_netdev:
 	unregister_netdev(dev);
-err_out_free_irq:
-	free_irq(dev->irq, dev);
-err_out_iounmap:
-	iounmap(bp->regs);
 err_out_disable_clocks:
+	if (!IS_ERR(bp->tx_clk))
+		clk_disable_unprepare(bp->tx_clk);
+err_out_disable_hclk:
 	clk_disable_unprepare(bp->hclk);
-	clk_put(bp->hclk);
+err_out_disable_pclk:
 	clk_disable_unprepare(bp->pclk);
-err_out_put_pclk:
-	clk_put(bp->pclk);
 err_out_free_dev:
 	free_netdev(dev);
 err_out:
@@ -1933,12 +1999,10 @@ static int __exit macb_remove(struct platform_device *pdev)
 		kfree(bp->mii_bus->irq);
 		mdiobus_free(bp->mii_bus);
 		unregister_netdev(dev);
-		free_irq(dev->irq, dev);
-		iounmap(bp->regs);
+		if (!IS_ERR(bp->tx_clk))
+			clk_disable_unprepare(bp->tx_clk);
 		clk_disable_unprepare(bp->hclk);
-		clk_put(bp->hclk);
 		clk_disable_unprepare(bp->pclk);
-		clk_put(bp->pclk);
 		free_netdev(dev);
 	}
 
@@ -1946,45 +2010,49 @@ static int __exit macb_remove(struct platform_device *pdev)
 }
 
 #ifdef CONFIG_PM
-static int macb_suspend(struct platform_device *pdev, pm_message_t state)
+static int macb_suspend(struct device *dev)
 {
+	struct platform_device *pdev = to_platform_device(dev);
 	struct net_device *netdev = platform_get_drvdata(pdev);
 	struct macb *bp = netdev_priv(netdev);
 
 	netif_carrier_off(netdev);
 	netif_device_detach(netdev);
 
+	if (!IS_ERR(bp->tx_clk))
+		clk_disable_unprepare(bp->tx_clk);
 	clk_disable_unprepare(bp->hclk);
 	clk_disable_unprepare(bp->pclk);
 
 	return 0;
 }
 
-static int macb_resume(struct platform_device *pdev)
+static int macb_resume(struct device *dev)
 {
+	struct platform_device *pdev = to_platform_device(dev);
 	struct net_device *netdev = platform_get_drvdata(pdev);
 	struct macb *bp = netdev_priv(netdev);
 
 	clk_prepare_enable(bp->pclk);
 	clk_prepare_enable(bp->hclk);
+	if (!IS_ERR(bp->tx_clk))
+		clk_prepare_enable(bp->tx_clk);
 
 	netif_device_attach(netdev);
 
 	return 0;
 }
-#else
-#define macb_suspend	NULL
-#define macb_resume	NULL
 #endif
 
+static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
+
 static struct platform_driver macb_driver = {
 	.remove		= __exit_p(macb_remove),
-	.suspend	= macb_suspend,
-	.resume		= macb_resume,
 	.driver		= {
 		.name		= "macb",
 		.owner	= THIS_MODULE,
 		.of_match_table	= of_match_ptr(macb_dt_ids),
+		.pm	= &macb_pm_ops,
 	},
 };
 
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index f4076155bed7..51c02442160a 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -572,6 +572,7 @@ struct macb {
 	struct platform_device	*pdev;
 	struct clk		*pclk;
 	struct clk		*hclk;
+	struct clk		*tx_clk;
 	struct net_device	*dev;
 	struct napi_struct	napi;
 	struct work_struct	tx_error_task;
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 4fc5c8ef5121..d2a183c3a6ce 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -14,7 +14,6 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/circ_buf.h>
 #include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 8abb46b39032..53b1f9478383 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -11,8 +11,7 @@
  * published by the Free Software Foundation.                                *
  *                                                                           *
  * You should have received a copy of the GNU General Public License along   *
- * with this program; if not, write to the Free Software Foundation, Inc.,   *
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
  *                                                                           *
  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
@@ -50,7 +49,6 @@
 #include <linux/if_vlan.h>
 #include <linux/mdio.h>
 #include <linux/crc32.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <asm/io.h>
 #include <linux/pci_ids.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb/cphy.h b/drivers/net/ethernet/chelsio/cxgb/cphy.h
index 1f095a9fc739..a4d2a4c08d3f 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cphy.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cphy.h
@@ -11,8 +11,7 @@
  * published by the Free Software Foundation.                                *
  *                                                                           *
  * You should have received a copy of the GNU General Public License along   *
- * with this program; if not, write to the Free Software Foundation, Inc.,   *
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
  *                                                                           *
  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
diff --git a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
index e36d45b78cc7..5249686afe71 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
@@ -11,8 +11,7 @@
  * published by the Free Software Foundation.                                *
  *                                                                           *
  * You should have received a copy of the GNU General Public License along   *
- * with this program; if not, write to the Free Software Foundation, Inc.,   *
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
  *                                                                           *
  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 1d021059f097..0fe7ff750d77 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -11,8 +11,7 @@
  * published by the Free Software Foundation.                                *
  *                                                                           *
  * You should have received a copy of the GNU General Public License along   *
- * with this program; if not, write to the Free Software Foundation, Inc.,   *
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
  *                                                                           *
  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
@@ -38,7 +37,6 @@
 
 #include "common.h"
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb/elmer0.h b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
index eef655c827d9..81526ad36339 100644
--- a/drivers/net/ethernet/chelsio/cxgb/elmer0.h
+++ b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
@@ -11,8 +11,7 @@
  * published by the Free Software Foundation.                                *
  *                                                                           *
  * You should have received a copy of the GNU General Public License along   *
- * with this program; if not, write to the Free Software Foundation, Inc.,   *
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
  *                                                                           *
  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.c b/drivers/net/ethernet/chelsio/cxgb/espi.c
index 639ff1955739..3e182eee799e 100644
--- a/drivers/net/ethernet/chelsio/cxgb/espi.c
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.c
@@ -12,8 +12,7 @@
  * published by the Free Software Foundation.                                *
  *                                                                           *
  * You should have received a copy of the GNU General Public License along   *
- * with this program; if not, write to the Free Software Foundation, Inc.,   *
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
  *                                                                           *
  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.h b/drivers/net/ethernet/chelsio/cxgb/espi.h
index 5694aad4fbc0..162de5259df9 100644
--- a/drivers/net/ethernet/chelsio/cxgb/espi.h
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.h
@@ -11,8 +11,7 @@
  * published by the Free Software Foundation.                                *
  *                                                                           *
  * You should have received a copy of the GNU General Public License along   *
- * with this program; if not, write to the Free Software Foundation, Inc.,   *
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
  *                                                                           *
  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
diff --git a/drivers/net/ethernet/chelsio/cxgb/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h
index d42337457cf7..dfa77491a910 100644
--- a/drivers/net/ethernet/chelsio/cxgb/gmac.h
+++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h
@@ -12,8 +12,7 @@
  * published by the Free Software Foundation.                                *
  *                                                                           *
  * You should have received a copy of the GNU General Public License along   *
- * with this program; if not, write to the Free Software Foundation, Inc.,   *
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
  *                                                                           *
  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
index f7136b2fd1e5..d0cf611551a1 100644
--- a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
@@ -12,8 +12,7 @@
  * published by the Free Software Foundation.                                *
  *                                                                           *
  * You should have received a copy of the GNU General Public License along   *
- * with this program; if not, write to the Free Software Foundation, Inc.,   *
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
  *                                                                           *
  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index eb33a31b08a0..ec5e05052d99 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -12,8 +12,7 @@
  * published by the Free Software Foundation.                                *
  *                                                                           *
  * You should have received a copy of the GNU General Public License along   *
- * with this program; if not, write to the Free Software Foundation, Inc.,   *
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
  *                                                                           *
  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
diff --git a/drivers/net/ethernet/chelsio/cxgb/regs.h b/drivers/net/ethernet/chelsio/cxgb/regs.h
index c80bf4d6d0a6..964ce59ee169 100644
--- a/drivers/net/ethernet/chelsio/cxgb/regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/regs.h
@@ -11,8 +11,7 @@
  * published by the Free Software Foundation.                                *
  *                                                                           *
  * You should have received a copy of the GNU General Public License along   *
- * with this program; if not, write to the Free Software Foundation, Inc.,   *
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
  *                                                                           *
  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 8061fb0ef7ed..4c5879389003 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -12,8 +12,7 @@
  * published by the Free Software Foundation.                                *
  *                                                                           *
  * You should have received a copy of the GNU General Public License along   *
- * with this program; if not, write to the Free Software Foundation, Inc.,   *
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
  *                                                                           *
  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
@@ -47,7 +46,6 @@
 #include <linux/etherdevice.h>
 #include <linux/if_vlan.h>
 #include <linux/skbuff.h>
-#include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/tcp.h>
 #include <linux/ip.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h
index b9bf16b385f7..a1ba591b3431 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.h
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.h
@@ -11,8 +11,7 @@
  * published by the Free Software Foundation.                                *
  *                                                                           *
  * You should have received a copy of the GNU General Public License along   *
- * with this program; if not, write to the Free Software Foundation, Inc.,   *
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
  *                                                                           *
  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
index e0a03a31e7c4..816719314cc8 100644
--- a/drivers/net/ethernet/chelsio/cxgb/subr.c
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -12,8 +12,7 @@
  * published by the Free Software Foundation.                                *
  *                                                                           *
  * You should have received a copy of the GNU General Public License along   *
- * with this program; if not, write to the Free Software Foundation, Inc.,   *
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
  *                                                                           *
  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
diff --git a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
index d0f87d82566a..7f79cc7ceb75 100644
--- a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
@@ -12,8 +12,7 @@
  * published by the Free Software Foundation.                                *
  *                                                                           *
  * You should have received a copy of the GNU General Public License along   *
- * with this program; if not, write to the Free Software Foundation, Inc.,   *
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ * with this program; if not, see <http://www.gnu.org/licenses/>.            *
  *                                                                           *
  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
index 8c82248ce416..442480982d3f 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -36,7 +36,6 @@
 #include <linux/types.h>
 #include <linux/ctype.h>
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/ethtool.h>
 #include <linux/mdio.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 76ae09999b5b..c0a9dd55f4e5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -182,7 +182,7 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
 	for_each_port(adapter, i) {
 		struct net_device *dev = adapter->port[i];
 
-		if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
+		if (ether_addr_equal(dev->dev_addr, mac)) {
 			rcu_read_lock();
 			if (vlan && vlan != VLAN_VID_MASK) {
 				dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), vlan);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 8d53438638b2..5f226eda8cd6 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -429,7 +429,7 @@ found:
 		} else {
 			e->state = neigh->nud_state & NUD_CONNECTED ?
 			    L2T_STATE_VALID : L2T_STATE_STALE;
-			if (memcmp(e->dmac, neigh->ha, 6))
+			if (!ether_addr_equal(e->dmac, neigh->ha))
 				setup_l2e_send_pending(dev, NULL, e);
 		}
 	}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 56e0415f8cdf..1f4b9b30b9ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -50,13 +50,13 @@
 #include "cxgb4_uld.h"
 
 #define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x06
-#define T4FW_VERSION_MICRO 0x18
+#define T4FW_VERSION_MINOR 0x09
+#define T4FW_VERSION_MICRO 0x17
 #define T4FW_VERSION_BUILD 0x00
 
 #define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x08
-#define T5FW_VERSION_MICRO 0x1C
+#define T5FW_VERSION_MINOR 0x09
+#define T5FW_VERSION_MICRO 0x17
 #define T5FW_VERSION_BUILD 0x00
 
 #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
@@ -387,8 +387,9 @@ struct work_struct;
 
 enum {                                 /* adapter flags */
 	FULL_INIT_DONE     = (1 << 0),
-	USING_MSI          = (1 << 1),
-	USING_MSIX         = (1 << 2),
+	DEV_ENABLED        = (1 << 1),
+	USING_MSI          = (1 << 2),
+	USING_MSIX         = (1 << 3),
 	FW_OK              = (1 << 4),
 	RSS_TNLALLLOOKUP   = (1 << 5),
 	USING_SOFT_PARAMS  = (1 << 6),
@@ -938,7 +939,6 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable);
 int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
 unsigned int t4_flash_cfg_addr(struct adapter *adapter);
-int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
 int t4_get_fw_version(struct adapter *adapter, u32 *vers);
 int t4_get_tp_version(struct adapter *adapter, u32 *vers);
 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
@@ -979,13 +979,6 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
 int t4_fw_bye(struct adapter *adap, unsigned int mbox);
 int t4_early_init(struct adapter *adap, unsigned int mbox);
 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
-int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force);
-int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
-int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
-		  const u8 *fw_data, unsigned int size, int force);
-int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
-		      unsigned int mtype, unsigned int maddr,
-		      u32 *finiver, u32 *finicsum, u32 *cfcsum);
 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
 			  unsigned int cache_line_size);
 int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index fff02ed1295e..43ab35fea48d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4288,7 +4288,15 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
 	struct port_info *p = netdev_priv(dev);
 	struct adapter *adapter = p->adapter;
 
+	/* Block retrieving statistics during EEH error
+	 * recovery. Otherwise, the recovery might fail
+	 * and the PCI device will be removed permanently
+	 */
 	spin_lock(&adapter->stats_lock);
+	if (!netif_device_present(dev)) {
+		spin_unlock(&adapter->stats_lock);
+		return ns;
+	}
 	t4_get_port_stats(adapter, p->tx_chan, &stats);
 	spin_unlock(&adapter->stats_lock);
 
@@ -5496,16 +5504,21 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
 	rtnl_lock();
 	adap->flags &= ~FW_OK;
 	notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
+	spin_lock(&adap->stats_lock);
 	for_each_port(adap, i) {
 		struct net_device *dev = adap->port[i];
 
 		netif_device_detach(dev);
 		netif_carrier_off(dev);
 	}
+	spin_unlock(&adap->stats_lock);
 	if (adap->flags & FULL_INIT_DONE)
 		cxgb_down(adap);
 	rtnl_unlock();
-	pci_disable_device(pdev);
+	if ((adap->flags & DEV_ENABLED)) {
+		pci_disable_device(pdev);
+		adap->flags &= ~DEV_ENABLED;
+	}
 out:	return state == pci_channel_io_perm_failure ?
 		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
 }
@@ -5522,9 +5535,13 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
 		return PCI_ERS_RESULT_RECOVERED;
 	}
 
-	if (pci_enable_device(pdev)) {
-		dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
-		return PCI_ERS_RESULT_DISCONNECT;
+	if (!(adap->flags & DEV_ENABLED)) {
+		if (pci_enable_device(pdev)) {
+			dev_err(&pdev->dev, "Cannot reenable PCI "
+					    "device after reset\n");
+			return PCI_ERS_RESULT_DISCONNECT;
+		}
+		adap->flags |= DEV_ENABLED;
 	}
 
 	pci_set_master(pdev);
@@ -5910,6 +5927,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto out_disable_device;
 	}
 
+	/* PCI device has been enabled */
+	adapter->flags |= DEV_ENABLED;
+
 	adapter->regs = pci_ioremap_bar(pdev, 0);
 	if (!adapter->regs) {
 		dev_err(&pdev->dev, "cannot map device registers\n");
@@ -6143,10 +6163,13 @@ static void remove_one(struct pci_dev *pdev)
 		iounmap(adapter->regs);
 		if (!is_t4(adapter->params.chip))
 			iounmap(adapter->bar2);
-		kfree(adapter);
 		pci_disable_pcie_error_reporting(pdev);
-		pci_disable_device(pdev);
+		if ((adapter->flags & DEV_ENABLED)) {
+			pci_disable_device(pdev);
+			adapter->flags &= ~DEV_ENABLED;
+		}
 		pci_release_regions(pdev);
+		kfree(adapter);
 	} else
 		pci_release_regions(pdev);
 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index cc3511a5cd0c..47ffa64fcf19 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1630,7 +1630,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
 	skb_record_rx_queue(skb, rxq->rspq.idx);
 	if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
-		skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
+			     PKT_HASH_TYPE_L3);
 
 	if (unlikely(pkt->vlan_ex)) {
 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
@@ -1686,7 +1687,8 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
 	skb->protocol = eth_type_trans(skb, q->netdev);
 	skb_record_rx_queue(skb, q->idx);
 	if (skb->dev->features & NETIF_F_RXHASH)
-		skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
+			     PKT_HASH_TYPE_L3);
 
 	rxq->stats.pkts++;
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e1413eacdbd2..2c109343d570 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -32,12 +32,13 @@
  * SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/delay.h>
 #include "cxgb4.h"
 #include "t4_regs.h"
 #include "t4fw_api.h"
 
+static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+			 const u8 *fw_data, unsigned int size, int force);
 /**
  *	t4_wait_op_done_val - wait until an operation is completed
  *	@adapter: the adapter performing the operation
@@ -1070,62 +1071,6 @@ unsigned int t4_flash_cfg_addr(struct adapter *adapter)
 }
 
 /**
- *	t4_load_cfg - download config file
- *	@adap: the adapter
- *	@cfg_data: the cfg text file to write
- *	@size: text file size
- *
- *	Write the supplied config text file to the card's serial flash.
- */
-int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
-{
-	int ret, i, n;
-	unsigned int addr;
-	unsigned int flash_cfg_start_sec;
-	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
-
-	addr = t4_flash_cfg_addr(adap);
-	flash_cfg_start_sec = addr / SF_SEC_SIZE;
-
-	if (size > FLASH_CFG_MAX_SIZE) {
-		dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
-			FLASH_CFG_MAX_SIZE);
-		return -EFBIG;
-	}
-
-	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
-			 sf_sec_size);
-	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
-				     flash_cfg_start_sec + i - 1);
-	/*
-	 * If size == 0 then we're simply erasing the FLASH sectors associated
-	 * with the on-adapter Firmware Configuration File.
-	 */
-	if (ret || size == 0)
-		goto out;
-
-	/* this will write to the flash up to SF_PAGE_SIZE at a time */
-	for (i = 0; i < size; i += SF_PAGE_SIZE) {
-		if ((size - i) <  SF_PAGE_SIZE)
-			n = size - i;
-		else
-			n = SF_PAGE_SIZE;
-		ret = t4_write_flash(adap, addr, n, cfg_data);
-		if (ret)
-			goto out;
-
-		addr += SF_PAGE_SIZE;
-		cfg_data += SF_PAGE_SIZE;
-	}
-
-out:
-	if (ret)
-		dev_err(adap->pdev_dev, "config file %s failed %d\n",
-			(size == 0 ? "clear" : "download"), ret);
-	return ret;
-}
-
-/**
  *	t4_load_fw - download firmware
  *	@adap: the adapter
  *	@fw_data: the firmware image to write
@@ -2810,7 +2755,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
  *	be doing.  The only way out of this state is to RESTART the firmware
  *	...
  */
-int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
+static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
 {
 	int ret = 0;
 
@@ -2875,7 +2820,7 @@ int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
  *	    the chip since older firmware won't recognize the PCIE_FW.HALT
  *	    flag and automatically RESET itself on startup.
  */
-int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
+static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
 {
 	if (reset) {
 		/*
@@ -2938,8 +2883,8 @@ int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
  *	positive errno indicates that the adapter is ~probably~ intact, a
  *	negative errno indicates that things are looking bad ...
  */
-int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
-		  const u8 *fw_data, unsigned int size, int force)
+static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+			 const u8 *fw_data, unsigned int size, int force)
 {
 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
 	int reset, ret;
@@ -2964,78 +2909,6 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
 	return t4_fw_restart(adap, mbox, reset);
 }
 
-
-/**
- *	t4_fw_config_file - setup an adapter via a Configuration File
- *	@adap: the adapter
- *	@mbox: mailbox to use for the FW command
- *	@mtype: the memory type where the Configuration File is located
- *	@maddr: the memory address where the Configuration File is located
- *	@finiver: return value for CF [fini] version
- *	@finicsum: return value for CF [fini] checksum
- *	@cfcsum: return value for CF computed checksum
- *
- *	Issue a command to get the firmware to process the Configuration
- *	File located at the specified mtype/maddress.  If the Configuration
- *	File is processed successfully and return value pointers are
- *	provided, the Configuration File "[fini] section version and
- *	checksum values will be returned along with the computed checksum.
- *	It's up to the caller to decide how it wants to respond to the
- *	checksums not matching but it recommended that a prominant warning
- *	be emitted in order to help people rapidly identify changed or
- *	corrupted Configuration Files.
- *
- *	Also note that it's possible to modify things like "niccaps",
- *	"toecaps",etc. between processing the Configuration File and telling
- *	the firmware to use the new configuration.  Callers which want to
- *	do this will need to "hand-roll" their own CAPS_CONFIGS commands for
- *	Configuration Files if they want to do this.
- */
-int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
-		      unsigned int mtype, unsigned int maddr,
-		      u32 *finiver, u32 *finicsum, u32 *cfcsum)
-{
-	struct fw_caps_config_cmd caps_cmd;
-	int ret;
-
-	/*
-	 * Tell the firmware to process the indicated Configuration File.
-	 * If there are no errors and the caller has provided return value
-	 * pointers for the [fini] section version, checksum and computed
-	 * checksum, pass those back to the caller.
-	 */
-	memset(&caps_cmd, 0, sizeof(caps_cmd));
-	caps_cmd.op_to_write =
-		htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
-		      FW_CMD_REQUEST |
-		      FW_CMD_READ);
-	caps_cmd.cfvalid_to_len16 =
-		htonl(FW_CAPS_CONFIG_CMD_CFVALID |
-		      FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
-		      FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
-		      FW_LEN16(caps_cmd));
-	ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd);
-	if (ret < 0)
-		return ret;
-
-	if (finiver)
-		*finiver = ntohl(caps_cmd.finiver);
-	if (finicsum)
-		*finicsum = ntohl(caps_cmd.finicsum);
-	if (cfcsum)
-		*cfcsum = ntohl(caps_cmd.cfcsum);
-
-	/*
-	 * And now tell the firmware to use the configuration we just loaded.
-	 */
-	caps_cmd.op_to_write =
-		htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
-		      FW_CMD_REQUEST |
-		      FW_CMD_WRITE);
-	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
-	return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
-}
-
 /**
  *	t4_fixup_host_params - fix up host-dependent parameters
  *	@adap: the adapter
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 61362450d05b..f412d0fa0850 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -268,7 +268,6 @@ int t4vf_wait_dev_ready(struct adapter *);
 int t4vf_port_init(struct adapter *, int);
 
 int t4vf_fw_reset(struct adapter *);
-int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
 int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
 
 int t4vf_get_sge_params(struct adapter *);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index d958c44341b5..25dfeb8f28ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -363,8 +363,8 @@ int t4vf_fw_reset(struct adapter *adapter)
  *	Reads the values of firmware or device parameters.  Up to 7 parameters
  *	can be queried at once.
  */
-int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
-		      const u32 *params, u32 *vals)
+static int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
+			     const u32 *params, u32 *vals)
 {
 	int i, ret;
 	struct fw_params_cmd cmd, rpl;
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index ec88de4ac162..2be2a99c5ea3 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -18,7 +18,6 @@
 #include <linux/mii.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/moduleparam.h>
 #include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index ff78dfaec508..b740bfce72ef 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1036,11 +1036,12 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
 		skb->protocol = eth_type_trans(skb, netdev);
 		skb_record_rx_queue(skb, q_number);
 		if (netdev->features & NETIF_F_RXHASH) {
-			skb->rxhash = rss_hash;
-			if (rss_type & (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
-					NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
-					NIC_CFG_RSS_HASH_TYPE_TCP_IPV4))
-				skb->l4_rxhash = true;
+			skb_set_hash(skb, rss_hash,
+				     (rss_type &
+				      (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
+				       NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
+				       NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) ?
+				     PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
 		}
 
 		if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index 43464f0a4f99..e6a83198c3dd 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -162,7 +162,7 @@ static int enic_are_pp_different(struct enic_port_profile *pp1,
 	return strcmp(pp1->name, pp2->name) | !!memcmp(pp1->instance_uuid,
 		pp2->instance_uuid, PORT_UUID_MAX) |
 		!!memcmp(pp1->host_uuid, pp2->host_uuid, PORT_UUID_MAX) |
-		!!memcmp(pp1->mac_addr, pp2->mac_addr, ETH_ALEN);
+		!ether_addr_equal(pp1->mac_addr, pp2->mac_addr);
 }
 
 static int enic_pp_preassociate(struct enic *enic, int vf,
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 7080ad6c4014..a1a2b4028a5c 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -23,7 +23,6 @@
 #include <linux/ioport.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
@@ -110,8 +109,8 @@ typedef struct board_info {
 	u8		imr_all;
 
 	unsigned int	flags;
-	unsigned int	in_suspend :1;
-	unsigned int	wake_supported :1;
+	unsigned int	in_suspend:1;
+	unsigned int	wake_supported:1;
 
 	enum dm9000_type type;
 
@@ -162,7 +161,7 @@ static inline board_info_t *to_dm9000_board(struct net_device *dev)
  *   Read a byte from I/O port
  */
 static u8
-ior(board_info_t * db, int reg)
+ior(board_info_t *db, int reg)
 {
 	writeb(reg, db->io_addr);
 	return readb(db->io_data);
@@ -173,7 +172,7 @@ ior(board_info_t * db, int reg)
  */
 
 static void
-iow(board_info_t * db, int reg, int value)
+iow(board_info_t *db, int reg, int value)
 {
 	writeb(reg, db->io_addr);
 	writeb(value, db->io_data);
@@ -745,9 +744,9 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
 	.get_link		= dm9000_get_link,
 	.get_wol		= dm9000_get_wol,
 	.set_wol		= dm9000_set_wol,
- 	.get_eeprom_len		= dm9000_get_eeprom_len,
- 	.get_eeprom		= dm9000_get_eeprom,
- 	.set_eeprom		= dm9000_set_eeprom,
+	.get_eeprom_len		= dm9000_get_eeprom_len,
+	.get_eeprom		= dm9000_get_eeprom,
+	.set_eeprom		= dm9000_set_eeprom,
 };
 
 static void dm9000_show_carrier(board_info_t *db,
@@ -795,7 +794,7 @@ dm9000_poll_work(struct work_struct *w)
 		}
 	} else
 		mii_check_media(&db->mii, netif_msg_link(db), 0);
-	
+
 	if (netif_running(ndev))
 		dm9000_schedule_poll(db);
 }
@@ -1252,12 +1251,11 @@ static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
 			dev_info(db->dev, "wake by link status change\n");
 		if (wcr & WCR_SAMPLEST)
 			dev_info(db->dev, "wake by sample packet\n");
-		if (wcr & WCR_MAGICST )
+		if (wcr & WCR_MAGICST)
 			dev_info(db->dev, "wake by magic packet\n");
 		if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
 			dev_err(db->dev, "wake signalled with no reason? "
 				"NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
-
 	}
 
 	spin_unlock_irqrestore(&db->lock, flags);
@@ -1314,7 +1312,7 @@ dm9000_open(struct net_device *dev)
 
 	mii_check_media(&db->mii, netif_msg_link(db), 1);
 	netif_start_queue(dev);
-	
+
 	dm9000_schedule_poll(db);
 
 	return 0;
@@ -1628,7 +1626,7 @@ dm9000_probe(struct platform_device *pdev)
 
 	if (!is_valid_ether_addr(ndev->dev_addr)) {
 		/* try reading from mac */
-		
+
 		mac_src = "chip";
 		for (i = 0; i < 6; i++)
 			ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
index df5a892fb49c..1812f4916917 100644
--- a/drivers/net/ethernet/dec/tulip/eeprom.c
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -13,7 +13,6 @@
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include "tulip.h"
-#include <linux/init.h>
 #include <asm/unaligned.h>
 
 
diff --git a/drivers/net/ethernet/dec/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c
index 93a4afaa09f1..dcf21a36a9cf 100644
--- a/drivers/net/ethernet/dec/tulip/media.c
+++ b/drivers/net/ethernet/dec/tulip/media.c
@@ -12,7 +12,6 @@
 
 #include <linux/kernel.h>
 #include <linux/mii.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/pci.h>
 #include "tulip.h"
@@ -458,7 +457,7 @@ void tulip_find_mii(struct net_device *dev, int board_idx)
 	/* Find the connected MII xcvrs.
 	   Doing this in open() would allow detecting external xcvrs later,
 	   but takes much time. */
-	for (phyn = 1; phyn <= 32 && phy_idx < sizeof (tp->phys); phyn++) {
+	for (phyn = 1; phyn <= 32 && phy_idx < ARRAY_SIZE(tp->phys); phyn++) {
 		int phy = phyn & 0x1f;
 		int mii_status = tulip_mdio_read (dev, phy, MII_BMSR);
 		if ((mii_status & 0x8301) == 0x8001 ||
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index a5397b130724..aa4ee385091f 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -1192,9 +1192,6 @@ static int uli526x_suspend(struct pci_dev *pdev, pm_message_t state)
 
 	ULI526X_DBUG(0, "uli526x_suspend", 0);
 
-	if (!netdev_priv(dev))
-		return 0;
-
 	pci_save_state(pdev);
 
 	if (!netif_running(dev))
@@ -1228,9 +1225,6 @@ static int uli526x_resume(struct pci_dev *pdev)
 
 	ULI526X_DBUG(0, "uli526x_resume", 0);
 
-	if (!netdev_priv(dev))
-		return 0;
-
 	pci_restore_state(pdev);
 
 	if (!netif_running(dev))
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index ab7ebac6fbea..6204cdfe43a6 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -28,7 +28,6 @@
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/bitops.h>
 
 #include <asm/uaccess.h>
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 3699565704c7..7d07a0f5320d 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -25,7 +25,6 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
-#include <linux/init.h>
 #include <linux/crc32.h>
 #include <linux/ethtool.h>
 #include <linux/mii.h>
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index f3d60eb13c3a..8a79a32a5674 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -15,7 +15,6 @@
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 4ccaf9af6fc9..8d09615da585 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
 #include "be_hw.h"
 #include "be_roce.h"
 
-#define DRV_VER			"4.9.224.0u"
+#define DRV_VER			"10.0.600.0u"
 #define DRV_NAME		"be2net"
 #define BE_NAME			"Emulex BladeEngine2"
 #define BE3_NAME		"Emulex BladeEngine3"
@@ -42,7 +42,7 @@
 #define OC_NAME_BE		OC_NAME	"(be3)"
 #define OC_NAME_LANCER		OC_NAME "(Lancer)"
 #define OC_NAME_SH		OC_NAME "(Skyhawk)"
-#define DRV_DESC		"Emulex OneConnect 10Gbps NIC Driver"
+#define DRV_DESC		"Emulex OneConnect NIC Driver"
 
 #define BE_VENDOR_ID 		0x19a2
 #define EMULEX_VENDOR_ID	0x10df
@@ -283,7 +283,6 @@ struct be_rx_compl_info {
 	u32 rss_hash;
 	u16 vlan_tag;
 	u16 pkt_size;
-	u16 rxq_idx;
 	u16 port;
 	u8 vlanf;
 	u8 num_rcvd;
@@ -493,7 +492,7 @@ struct be_adapter {
 	u16 pvid;
 	struct phy_info phy;
 	u8 wol_cap;
-	bool wol;
+	bool wol_en;
 	u32 uc_macs;		/* Count of secondary UC MAC programmed */
 	u16 asic_rev;
 	u16 qnq_vid;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 94c35c8d799d..48076a6370c3 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1101,23 +1101,22 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
 			OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
 
 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
-	if (lancer_chip(adapter)) {
-		req->hdr.version = 1;
-		req->cq_id = cpu_to_le16(cq->id);
-
-		AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
-						be_encoded_q_len(mccq->len));
-		AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
-		AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
-								ctxt, cq->id);
-		AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
-								 ctxt, 1);
-
-	} else {
+	if (BEx_chip(adapter)) {
 		AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
 		AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
 						be_encoded_q_len(mccq->len));
 		AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
+	} else {
+		req->hdr.version = 1;
+		req->cq_id = cpu_to_le16(cq->id);
+
+		AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
+			      be_encoded_q_len(mccq->len));
+		AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
+		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
+			      ctxt, cq->id);
+		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
+			      ctxt, 1);
 	}
 
 	/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
@@ -1187,7 +1186,7 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
 	int status;
 
 	status = be_cmd_mccq_ext_create(adapter, mccq, cq);
-	if (status && !lancer_chip(adapter)) {
+	if (status && BEx_chip(adapter)) {
 		dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
 			"or newer to avoid conflicting priorities between NIC "
 			"and FCoE traffic");
@@ -2692,6 +2691,13 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
 		struct be_cmd_resp_get_fn_privileges *resp =
 						embedded_payload(wrb);
 		*privilege = le32_to_cpu(resp->privilege_mask);
+
+		/* In UMC mode FW does not return right privileges.
+		 * Override with correct privilege equivalent to PF.
+		 */
+		if (BEx_chip(adapter) && be_is_mc(adapter) &&
+		    be_physfn(adapter))
+			*privilege = MAX_PRIVILEGES;
 	}
 
 err:
@@ -2736,7 +2742,8 @@ err:
  *		  If pmac_id is returned, pmac_id_valid is returned as true
  */
 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
-			     bool *pmac_id_valid, u32 *pmac_id, u8 domain)
+			     bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
+			     u8 domain)
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_get_mac_list *req;
@@ -2774,7 +2781,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
 	req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
 	if (*pmac_id_valid) {
 		req->mac_id = cpu_to_le32(*pmac_id);
-		req->iface_id = cpu_to_le16(adapter->if_handle);
+		req->iface_id = cpu_to_le16(if_handle);
 		req->perm_override = 0;
 	} else {
 		req->perm_override = 1;
@@ -2827,17 +2834,21 @@ out:
 	return status;
 }
 
-int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac)
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac,
+			  u32 if_handle, bool active, u32 domain)
 {
-	bool active = true;
 
+	if (!active)
+		be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
+					 if_handle, domain);
 	if (BEx_chip(adapter))
 		return be_cmd_mac_addr_query(adapter, mac, false,
-					     adapter->if_handle, curr_pmac_id);
+					     if_handle, curr_pmac_id);
 	else
 		/* Fetch the MAC address using pmac_id */
 		return be_cmd_get_mac_from_list(adapter, mac, &active,
-						&curr_pmac_id, 0);
+						&curr_pmac_id,
+						if_handle, domain);
 }
 
 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
@@ -2856,7 +2867,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
 						       adapter->if_handle, 0);
 	} else {
 		status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
-						  NULL, 0);
+						  NULL, adapter->if_handle, 0);
 	}
 
 	return status;
@@ -2917,7 +2928,8 @@ int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
 	int status;
 
 	status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
-					  &pmac_id, dom);
+					  &pmac_id, if_id, dom);
+
 	if (!status && active_mac)
 		be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
 
@@ -2997,7 +3009,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
 		      ctxt, intf_id);
 	AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
 
-	if (!BEx_chip(adapter)) {
+	if (!BEx_chip(adapter) && mode) {
 		AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
 			      ctxt, adapter->hba_port_num);
 		AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
@@ -3028,14 +3040,16 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_acpi_wol_magic_config_v1 *req;
-	int status;
-	int payload_len = sizeof(*req);
+	int status = 0;
 	struct be_dma_mem cmd;
 
 	if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
 			    CMD_SUBSYSTEM_ETH))
 		return -EPERM;
 
+	if (be_is_wol_excluded(adapter))
+		return status;
+
 	if (mutex_lock_interruptible(&adapter->mbox_lock))
 		return -1;
 
@@ -3060,7 +3074,7 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
 
 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
 			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
-			       payload_len, wrb, &cmd);
+			       sizeof(*req), wrb, &cmd);
 
 	req->hdr.version = 1;
 	req->query_options = BE_GET_WOL_CAP;
@@ -3070,13 +3084,9 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
 		struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
 		resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
 
-		/* the command could succeed misleadingly on old f/w
-		 * which is not aware of the V1 version. fake an error. */
-		if (resp->hdr.response_length < payload_len) {
-			status = -1;
-			goto err;
-		}
 		adapter->wol_cap = resp->wol_settings;
+		if (adapter->wol_cap & BE_WOL_CAP)
+			adapter->wol_en = true;
 	}
 err:
 	mutex_unlock(&adapter->mbox_lock);
@@ -3085,6 +3095,76 @@ err:
 	return status;
 
 }
+
+int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
+{
+	struct be_dma_mem extfat_cmd;
+	struct be_fat_conf_params *cfgs;
+	int status;
+	int i, j;
+
+	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
+	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
+	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
+					     &extfat_cmd.dma);
+	if (!extfat_cmd.va)
+		return -ENOMEM;
+
+	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
+	if (status)
+		goto err;
+
+	cfgs = (struct be_fat_conf_params *)
+			(extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
+	for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
+		u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
+		for (j = 0; j < num_modes; j++) {
+			if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
+				cfgs->module[i].trace_lvl[j].dbg_lvl =
+							cpu_to_le32(level);
+		}
+	}
+
+	status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
+err:
+	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
+			    extfat_cmd.dma);
+	return status;
+}
+
+int be_cmd_get_fw_log_level(struct be_adapter *adapter)
+{
+	struct be_dma_mem extfat_cmd;
+	struct be_fat_conf_params *cfgs;
+	int status, j;
+	int level = 0;
+
+	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
+	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
+	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
+					     &extfat_cmd.dma);
+
+	if (!extfat_cmd.va) {
+		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
+			__func__);
+		goto err;
+	}
+
+	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
+	if (!status) {
+		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
+						sizeof(struct be_cmd_resp_hdr));
+		for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
+			if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
+				level = cfgs->module[0].trace_lvl[j].dbg_lvl;
+		}
+	}
+	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
+			    extfat_cmd.dma);
+err:
+	return level;
+}
+
 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
 				   struct be_dma_mem *cmd)
 {
@@ -3609,6 +3689,40 @@ int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
 	return status;
 }
 
+/* Uses MBOX */
+int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
+{
+	struct be_cmd_req_get_active_profile *req;
+	struct be_mcc_wrb *wrb;
+	int status;
+
+	if (mutex_lock_interruptible(&adapter->mbox_lock))
+		return -1;
+
+	wrb = wrb_from_mbox(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+
+	req = embedded_payload(wrb);
+
+	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+			       OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
+			       wrb, NULL);
+
+	status = be_mbox_notify_wait(adapter);
+	if (!status) {
+		struct be_cmd_resp_get_active_profile *resp =
+							embedded_payload(wrb);
+		*profile_id = le16_to_cpu(resp->active_profile_id);
+	}
+
+err:
+	mutex_unlock(&adapter->mbox_lock);
+	return status;
+}
+
 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
 			int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
 {
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 0075686276aa..fc4e076dc202 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -216,6 +216,7 @@ struct be_mcc_mailbox {
 #define OPCODE_COMMON_GET_FUNC_CONFIG			160
 #define OPCODE_COMMON_GET_PROFILE_CONFIG		164
 #define OPCODE_COMMON_SET_PROFILE_CONFIG		165
+#define OPCODE_COMMON_GET_ACTIVE_PROFILE		167
 #define OPCODE_COMMON_SET_HSW_CONFIG			153
 #define OPCODE_COMMON_GET_FN_PRIVILEGES			170
 #define OPCODE_COMMON_READ_OBJECT			171
@@ -452,7 +453,7 @@ struct amap_mcc_context_be {
 	u8 rsvd2[32];
 } __packed;
 
-struct amap_mcc_context_lancer {
+struct amap_mcc_context_v1 {
 	u8 async_cq_id[16];
 	u8 ring_size[4];
 	u8 rsvd0[12];
@@ -476,7 +477,7 @@ struct be_cmd_req_mcc_ext_create {
 	u16 num_pages;
 	u16 cq_id;
 	u32 async_event_bitmap[1];
-	u8 context[sizeof(struct amap_mcc_context_be) / 8];
+	u8 context[sizeof(struct amap_mcc_context_v1) / 8];
 	struct phys_addr pages[8];
 } __packed;
 
@@ -1097,6 +1098,14 @@ struct be_cmd_resp_query_fw_cfg {
 	u32 function_caps;
 };
 
+/* Is BE in a multi-channel mode */
+static inline bool be_is_mc(struct be_adapter *adapter)
+{
+	return adapter->function_mode & FLEX10_MODE ||
+		adapter->function_mode & VNIC_MODE ||
+		adapter->function_mode & UMC_ENABLED;
+}
+
 /******************** RSS Config ****************************************/
 /* RSS type		Input parameters used to compute RX hash
  * RSS_ENABLE_IPV4	SRC IPv4, DST IPv4
@@ -1917,6 +1926,17 @@ struct be_cmd_resp_set_profile_config {
 	struct be_cmd_resp_hdr hdr;
 };
 
+struct be_cmd_req_get_active_profile {
+	struct be_cmd_req_hdr hdr;
+	u32 rsvd;
+} __packed;
+
+struct be_cmd_resp_get_active_profile {
+	struct be_cmd_resp_hdr hdr;
+	u16 active_profile_id;
+	u16 next_profile_id;
+} __packed;
+
 struct be_cmd_enable_disable_vf {
 	struct be_cmd_req_hdr hdr;
 	u8 enable;
@@ -2037,8 +2057,10 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
 			     u32 vf_num);
 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
-			     bool *pmac_id_active, u32 *pmac_id, u8 domain);
-int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac);
+			     bool *pmac_id_active, u32 *pmac_id,
+			     u32 if_handle, u8 domain);
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac,
+			  u32 if_handle, bool active, u32 domain);
 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count,
 			u32 domain);
@@ -2048,6 +2070,8 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain,
 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain,
 			  u16 intf_id, u8 *mode);
 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
+int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level);
+int be_cmd_get_fw_log_level(struct be_adapter *adapter);
 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
 				   struct be_dma_mem *cmd);
 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
@@ -2063,6 +2087,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter,
 int be_cmd_get_profile_config(struct be_adapter *adapter,
 			      struct be_resources *res, u8 domain);
 int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain);
+int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
 		     int vf_num);
 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 08330034d9ef..05be0070f55f 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -713,12 +713,13 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
 
-	if (be_is_wol_supported(adapter)) {
+	if (adapter->wol_cap & BE_WOL_CAP) {
 		wol->supported |= WAKE_MAGIC;
-		if (adapter->wol)
+		if (adapter->wol_en)
 			wol->wolopts |= WAKE_MAGIC;
-	} else
+	} else {
 		wol->wolopts = 0;
+	}
 	memset(&wol->sopass, 0, sizeof(wol->sopass));
 }
 
@@ -730,15 +731,15 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 	if (wol->wolopts & ~WAKE_MAGIC)
 		return -EOPNOTSUPP;
 
-	if (!be_is_wol_supported(adapter)) {
+	if (!(adapter->wol_cap & BE_WOL_CAP)) {
 		dev_warn(&adapter->pdev->dev, "WOL not supported\n");
 		return -EOPNOTSUPP;
 	}
 
 	if (wol->wolopts & WAKE_MAGIC)
-		adapter->wol = true;
+		adapter->wol_en = true;
 	else
-		adapter->wol = false;
+		adapter->wol_en = false;
 
 	return 0;
 }
@@ -904,73 +905,21 @@ static u32 be_get_msg_level(struct net_device *netdev)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
 
-	if (lancer_chip(adapter)) {
-		dev_err(&adapter->pdev->dev, "Operation not supported\n");
-		return -EOPNOTSUPP;
-	}
-
 	return adapter->msg_enable;
 }
 
-static void be_set_fw_log_level(struct be_adapter *adapter, u32 level)
-{
-	struct be_dma_mem extfat_cmd;
-	struct be_fat_conf_params *cfgs;
-	int status;
-	int i, j;
-
-	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
-	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
-	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
-					     &extfat_cmd.dma);
-	if (!extfat_cmd.va) {
-		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
-			__func__);
-		goto err;
-	}
-	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
-	if (!status) {
-		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
-					sizeof(struct be_cmd_resp_hdr));
-		for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
-			u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
-			for (j = 0; j < num_modes; j++) {
-				if (cfgs->module[i].trace_lvl[j].mode ==
-								MODE_UART)
-					cfgs->module[i].trace_lvl[j].dbg_lvl =
-							cpu_to_le32(level);
-			}
-		}
-		status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd,
-							cfgs);
-		if (status)
-			dev_err(&adapter->pdev->dev,
-				"Message level set failed\n");
-	} else {
-		dev_err(&adapter->pdev->dev, "Message level get failed\n");
-	}
-
-	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
-			    extfat_cmd.dma);
-err:
-	return;
-}
-
 static void be_set_msg_level(struct net_device *netdev, u32 level)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
 
-	if (lancer_chip(adapter)) {
-		dev_err(&adapter->pdev->dev, "Operation not supported\n");
-		return;
-	}
-
 	if (adapter->msg_enable == level)
 		return;
 
 	if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW))
-		be_set_fw_log_level(adapter, level & NETIF_MSG_HW ?
-				    FW_LOG_LEVEL_DEFAULT : FW_LOG_LEVEL_FATAL);
+		if (BEx_chip(adapter))
+			be_cmd_set_fw_log_level(adapter, level & NETIF_MSG_HW ?
+						FW_LOG_LEVEL_DEFAULT :
+						FW_LOG_LEVEL_FATAL);
 	adapter->msg_enable = level;
 
 	return;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index a37039d353c5..04ac9c6a0d39 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -121,12 +121,6 @@ static const char * const ue_status_hi_desc[] = {
 	"Unknown"
 };
 
-/* Is BE in a multi-channel mode */
-static inline bool be_is_mc(struct be_adapter *adapter) {
-	return (adapter->function_mode & FLEX10_MODE ||
-		adapter->function_mode & VNIC_MODE ||
-		adapter->function_mode & UMC_ENABLED);
-}
 
 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
 {
@@ -258,6 +252,12 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
 	if (!is_valid_ether_addr(addr->sa_data))
 		return -EADDRNOTAVAIL;
 
+	/* Proceed further only if, User provided MAC is different
+	 * from active MAC
+	 */
+	if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
+		return 0;
+
 	/* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
 	 * privilege or if PF did not provision the new MAC address.
 	 * On BE3, this cmd will always fail if the VF doesn't have the
@@ -280,14 +280,15 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
 	/* Decide if the new MAC is successfully activated only after
 	 * querying the FW
 	 */
-	status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
+	status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
+				       adapter->if_handle, true, 0);
 	if (status)
 		goto err;
 
 	/* The MAC change did not happen, either due to lack of privilege
 	 * or PF didn't pre-provision.
 	 */
-	if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
+	if (!ether_addr_equal(addr->sa_data, mac)) {
 		status = -EPERM;
 		goto err;
 	}
@@ -1096,8 +1097,6 @@ static int be_vid_config(struct be_adapter *adapter)
 				dev_info(&adapter->pdev->dev,
 					 "Disabling VLAN Promiscuous mode.\n");
 				adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
-				dev_info(&adapter->pdev->dev,
-					 "Re-Enabling HW VLAN filtering\n");
 			}
 		}
 	}
@@ -1105,12 +1104,12 @@ static int be_vid_config(struct be_adapter *adapter)
 	return status;
 
 set_vlan_promisc:
-	dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
+	if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
+		return 0;
 
 	status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
 	if (!status) {
 		dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
-		dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
 		adapter->flags |= BE_FLAGS_VLAN_PROMISC;
 	} else
 		dev_err(&adapter->pdev->dev,
@@ -1123,19 +1122,18 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
 	struct be_adapter *adapter = netdev_priv(netdev);
 	int status = 0;
 
-
 	/* Packets with VID 0 are always received by Lancer by default */
 	if (lancer_chip(adapter) && vid == 0)
 		goto ret;
 
 	adapter->vlan_tag[vid] = 1;
-	if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
-		status = be_vid_config(adapter);
+	adapter->vlans_added++;
 
-	if (!status)
-		adapter->vlans_added++;
-	else
+	status = be_vid_config(adapter);
+	if (status) {
+		adapter->vlans_added--;
 		adapter->vlan_tag[vid] = 0;
+	}
 ret:
 	return status;
 }
@@ -1150,9 +1148,7 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
 		goto ret;
 
 	adapter->vlan_tag[vid] = 0;
-	if (adapter->vlans_added <= be_max_vlans(adapter))
-		status = be_vid_config(adapter);
-
+	status = be_vid_config(adapter);
 	if (!status)
 		adapter->vlans_added--;
 	else
@@ -1442,12 +1438,12 @@ static inline bool csum_passed(struct be_rx_compl_info *rxcp)
 				(rxcp->ip_csum || rxcp->ipv6);
 }
 
-static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
-						u16 frag_idx)
+static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
 {
 	struct be_adapter *adapter = rxo->adapter;
 	struct be_rx_page_info *rx_page_info;
 	struct be_queue_info *rxq = &rxo->q;
+	u16 frag_idx = rxq->tail;
 
 	rx_page_info = &rxo->page_info_tbl[frag_idx];
 	BUG_ON(!rx_page_info->page);
@@ -1459,6 +1455,7 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
 		rx_page_info->last_page_user = false;
 	}
 
+	queue_tail_inc(rxq);
 	atomic_dec(&rxq->used);
 	return rx_page_info;
 }
@@ -1467,15 +1464,13 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
 static void be_rx_compl_discard(struct be_rx_obj *rxo,
 				struct be_rx_compl_info *rxcp)
 {
-	struct be_queue_info *rxq = &rxo->q;
 	struct be_rx_page_info *page_info;
 	u16 i, num_rcvd = rxcp->num_rcvd;
 
 	for (i = 0; i < num_rcvd; i++) {
-		page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
+		page_info = get_rx_page_info(rxo);
 		put_page(page_info->page);
 		memset(page_info, 0, sizeof(*page_info));
-		index_inc(&rxcp->rxq_idx, rxq->len);
 	}
 }
 
@@ -1486,13 +1481,12 @@ static void be_rx_compl_discard(struct be_rx_obj *rxo,
 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
 			     struct be_rx_compl_info *rxcp)
 {
-	struct be_queue_info *rxq = &rxo->q;
 	struct be_rx_page_info *page_info;
 	u16 i, j;
 	u16 hdr_len, curr_frag_len, remaining;
 	u8 *start;
 
-	page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
+	page_info = get_rx_page_info(rxo);
 	start = page_address(page_info->page) + page_info->page_offset;
 	prefetch(start);
 
@@ -1526,10 +1520,9 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
 	}
 
 	/* More frags present for this completion */
-	index_inc(&rxcp->rxq_idx, rxq->len);
 	remaining = rxcp->pkt_size - curr_frag_len;
 	for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
-		page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
+		page_info = get_rx_page_info(rxo);
 		curr_frag_len = min(remaining, rx_frag_size);
 
 		/* Coalesce all frags from the same physical page in one slot */
@@ -1550,7 +1543,6 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
 		skb->data_len += curr_frag_len;
 		skb->truesize += rx_frag_size;
 		remaining -= curr_frag_len;
-		index_inc(&rxcp->rxq_idx, rxq->len);
 		page_info->page = NULL;
 	}
 	BUG_ON(j > MAX_SKB_FRAGS);
@@ -1581,7 +1573,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
 	skb->protocol = eth_type_trans(skb, netdev);
 	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
 	if (netdev->features & NETIF_F_RXHASH)
-		skb->rxhash = rxcp->rss_hash;
+		skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
 	skb_mark_napi_id(skb, napi);
 
 	if (rxcp->vlanf)
@@ -1598,7 +1590,6 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
 	struct be_adapter *adapter = rxo->adapter;
 	struct be_rx_page_info *page_info;
 	struct sk_buff *skb = NULL;
-	struct be_queue_info *rxq = &rxo->q;
 	u16 remaining, curr_frag_len;
 	u16 i, j;
 
@@ -1610,7 +1601,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
 
 	remaining = rxcp->pkt_size;
 	for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
-		page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
+		page_info = get_rx_page_info(rxo);
 
 		curr_frag_len = min(remaining, rx_frag_size);
 
@@ -1628,7 +1619,6 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
 		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
 		skb->truesize += rx_frag_size;
 		remaining -= curr_frag_len;
-		index_inc(&rxcp->rxq_idx, rxq->len);
 		memset(page_info, 0, sizeof(*page_info));
 	}
 	BUG_ON(j > MAX_SKB_FRAGS);
@@ -1639,7 +1629,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
 	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
 	if (adapter->netdev->features & NETIF_F_RXHASH)
-		skb->rxhash = rxcp->rss_hash;
+		skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
 	skb_mark_napi_id(skb, napi);
 
 	if (rxcp->vlanf)
@@ -1663,8 +1653,6 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
 	rxcp->ipv6 =
 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
-	rxcp->rxq_idx =
-		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
 	rxcp->num_rcvd =
 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
 	rxcp->pkt_type =
@@ -1695,8 +1683,6 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
 	rxcp->ipv6 =
 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
-	rxcp->rxq_idx =
-		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
 	rxcp->num_rcvd =
 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
 	rxcp->pkt_type =
@@ -1921,7 +1907,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
 	struct be_rx_compl_info *rxcp;
 	struct be_adapter *adapter = rxo->adapter;
 	int flush_wait = 0;
-	u16 tail;
 
 	/* Consume pending rx completions.
 	 * Wait for the flush completion (identified by zero num_rcvd)
@@ -1954,9 +1939,8 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
 	be_cq_notify(adapter, rx_cq->id, false, 0);
 
 	/* Then free posted rx buffers that were not used */
-	tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
-	for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
-		page_info = get_rx_page_info(rxo, tail);
+	while (atomic_read(&rxq->used) > 0) {
+		page_info = get_rx_page_info(rxo);
 		put_page(page_info->page);
 		memset(page_info, 0, sizeof(*page_info));
 	}
@@ -2891,14 +2875,11 @@ static int be_vfs_mac_query(struct be_adapter *adapter)
 	int status, vf;
 	u8 mac[ETH_ALEN];
 	struct be_vf_cfg *vf_cfg;
-	bool active = false;
 
 	for_all_vfs(adapter, vf_cfg, vf) {
-		be_cmd_get_mac_from_list(adapter, mac, &active,
-					 &vf_cfg->pmac_id, 0);
-
-		status = be_cmd_mac_addr_query(adapter, mac, false,
-					       vf_cfg->if_handle, 0);
+		status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
+					       mac, vf_cfg->if_handle,
+					       false, vf+1);
 		if (status)
 			return status;
 		memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
@@ -3240,6 +3221,7 @@ static int be_get_resources(struct be_adapter *adapter)
 /* Routine to query per function resource limits */
 static int be_get_config(struct be_adapter *adapter)
 {
+	u16 profile_id;
 	int status;
 
 	status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
@@ -3249,6 +3231,13 @@ static int be_get_config(struct be_adapter *adapter)
 	if (status)
 		return status;
 
+	 if (be_physfn(adapter)) {
+		status = be_cmd_get_active_profile(adapter, &profile_id);
+		if (!status)
+			dev_info(&adapter->pdev->dev,
+				 "Using profile 0x%x\n", profile_id);
+	}
+
 	status = be_get_resources(adapter);
 	if (status)
 		return status;
@@ -3403,11 +3392,6 @@ static int be_setup(struct be_adapter *adapter)
 		goto err;
 
 	be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
-	/* In UMC mode FW does not return right privileges.
-	 * Override with correct privilege equivalent to PF.
-	 */
-	if (be_is_mc(adapter))
-		adapter->cmd_privileges = MAX_PRIVILEGES;
 
 	status = be_mac_setup(adapter);
 	if (status)
@@ -3426,6 +3410,8 @@ static int be_setup(struct be_adapter *adapter)
 
 	be_set_rx_mode(adapter->netdev);
 
+	be_cmd_get_acpi_wol_cap(adapter);
+
 	be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
 
 	if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
@@ -4295,74 +4281,22 @@ static void be_remove(struct pci_dev *pdev)
 	free_netdev(adapter->netdev);
 }
 
-bool be_is_wol_supported(struct be_adapter *adapter)
-{
-	return ((adapter->wol_cap & BE_WOL_CAP) &&
-		!be_is_wol_excluded(adapter)) ? true : false;
-}
-
-u32 be_get_fw_log_level(struct be_adapter *adapter)
-{
-	struct be_dma_mem extfat_cmd;
-	struct be_fat_conf_params *cfgs;
-	int status;
-	u32 level = 0;
-	int j;
-
-	if (lancer_chip(adapter))
-		return 0;
-
-	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
-	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
-	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
-					     &extfat_cmd.dma);
-
-	if (!extfat_cmd.va) {
-		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
-			__func__);
-		goto err;
-	}
-
-	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
-	if (!status) {
-		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
-						sizeof(struct be_cmd_resp_hdr));
-		for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
-			if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
-				level = cfgs->module[0].trace_lvl[j].dbg_lvl;
-		}
-	}
-	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
-			    extfat_cmd.dma);
-err:
-	return level;
-}
-
 static int be_get_initial_config(struct be_adapter *adapter)
 {
-	int status;
-	u32 level;
+	int status, level;
 
 	status = be_cmd_get_cntl_attributes(adapter);
 	if (status)
 		return status;
 
-	status = be_cmd_get_acpi_wol_cap(adapter);
-	if (status) {
-		/* in case of a failure to get wol capabillities
-		 * check the exclusion list to determine WOL capability */
-		if (!be_is_wol_excluded(adapter))
-			adapter->wol_cap |= BE_WOL_CAP;
-	}
-
-	if (be_is_wol_supported(adapter))
-		adapter->wol = true;
-
 	/* Must be a power of 2 or else MODULO will BUG_ON */
 	adapter->be_get_temp_freq = 64;
 
-	level = be_get_fw_log_level(adapter);
-	adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
+	if (BEx_chip(adapter)) {
+		level = be_cmd_get_fw_log_level(adapter);
+		adapter->msg_enable =
+			level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
+	}
 
 	adapter->cfg_num_qs = netif_get_num_default_rss_queues();
 	return 0;
@@ -4625,7 +4559,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
 	struct be_adapter *adapter = pci_get_drvdata(pdev);
 	struct net_device *netdev =  adapter->netdev;
 
-	if (adapter->wol)
+	if (adapter->wol_en)
 		be_setup_wol(adapter, true);
 
 	be_intr_set(adapter, false);
@@ -4681,7 +4615,7 @@ static int be_resume(struct pci_dev *pdev)
 			      msecs_to_jiffies(1000));
 	netif_device_attach(netdev);
 
-	if (adapter->wol)
+	if (adapter->wol_en)
 		be_setup_wol(adapter, false);
 
 	return 0;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 212f44b3a773..c11ecbc98149 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -24,7 +24,6 @@
 #include <linux/dma-mapping.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/module.h>
@@ -767,7 +766,7 @@ static void ftgmac100_free_buffers(struct ftgmac100 *priv)
 			continue;
 
 		dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
-		dev_kfree_skb(skb);
+		kfree_skb(skb);
 	}
 
 	dma_free_coherent(priv->dev, sizeof(struct ftgmac100_descs),
@@ -1149,7 +1148,7 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
 			netdev_dbg(netdev, "tx packet too big\n");
 
 		netdev->stats.tx_dropped++;
-		dev_kfree_skb(skb);
+		kfree_skb(skb);
 		return NETDEV_TX_OK;
 	}
 
@@ -1160,7 +1159,7 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
 			netdev_err(netdev, "map socket buffer failed\n");
 
 		netdev->stats.tx_dropped++;
-		dev_kfree_skb(skb);
+		kfree_skb(skb);
 		return NETDEV_TX_OK;
 	}
 
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 0120217a16dd..3b8d6d19ff05 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -339,7 +339,8 @@ struct fec_enet_private {
 
 void fec_ptp_init(struct platform_device *pdev);
 void fec_ptp_start_cyclecounter(struct net_device *ndev);
-int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
+int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
+int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
 
 /****************************************************************************/
 #endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 50bb71c663e2..d4782b42401b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -29,7 +29,6 @@
 #include <linux/ioport.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -1679,8 +1678,12 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
 	if (!phydev)
 		return -ENODEV;
 
-	if (cmd == SIOCSHWTSTAMP && fep->bufdesc_ex)
-		return fec_ptp_ioctl(ndev, rq, cmd);
+	if (fep->bufdesc_ex) {
+		if (cmd == SIOCSHWTSTAMP)
+			return fec_ptp_set(ndev, rq);
+		if (cmd == SIOCGHWTSTAMP)
+			return fec_ptp_get(ndev, rq);
+	}
 
 	return phy_mii_ioctl(phydev, rq, cmd);
 }
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 5007e4f9fff9..89ccb5b08708 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -28,7 +28,6 @@
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -274,7 +273,7 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
  * @ifreq: ioctl data
  * @cmd: particular ioctl requested
  */
-int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
 {
 	struct fec_enet_private *fep = netdev_priv(ndev);
 
@@ -321,6 +320,20 @@ int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
 	    -EFAULT : 0;
 }
 
+int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct hwtstamp_config config;
+
+	config.flags = 0;
+	config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+	config.rx_filter = (fep->hwts_rx_en ?
+			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+		-EFAULT : 0;
+}
+
 /**
  * fec_time_keep - call timecounter_read every second to avoid timer overrun
  *                 because ENET just support 32bit counter, will timeout in 4s
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 56f2f608a9f4..62f042d4aaa9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -24,7 +24,6 @@
 #include <linux/ioport.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index f8b92864fc52..f5383abbf399 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -20,7 +20,6 @@
 #include <linux/errno.h>
 #include <linux/ioport.h>
 #include <linux/interrupt.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index a9a00f39521a..fc5413488496 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -20,7 +20,6 @@
 #include <linux/errno.h>
 #include <linux/ioport.h>
 #include <linux/interrupt.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index d37cd4ebac65..b4bf02f57d43 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -20,7 +20,6 @@
 #include <linux/errno.h>
 #include <linux/ioport.h>
 #include <linux/interrupt.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 67caaacd19ec..3d3fde66c2cc 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -15,7 +15,6 @@
 #include <linux/module.h>
 #include <linux/ioport.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index ac5d447ff8c4..7e69c983d12a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -21,7 +21,6 @@
 #include <linux/ioport.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index c4f65067cf7c..583e71ab7f51 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -20,7 +20,6 @@
 #include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/mii.h>
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index b14d7904a075..ad5a5aadc7e1 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -70,7 +70,6 @@
 #include <linux/unistd.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -795,8 +794,7 @@ err_grp_init:
 	return err;
 }
 
-static int gfar_hwtstamp_ioctl(struct net_device *netdev,
-			       struct ifreq *ifr, int cmd)
+static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
 {
 	struct hwtstamp_config config;
 	struct gfar_private *priv = netdev_priv(netdev);
@@ -845,7 +843,20 @@ static int gfar_hwtstamp_ioctl(struct net_device *netdev,
 		-EFAULT : 0;
 }
 
-/* Ioctl MII Interface */
+static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+	struct hwtstamp_config config;
+	struct gfar_private *priv = netdev_priv(netdev);
+
+	config.flags = 0;
+	config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+	config.rx_filter = (priv->hwts_rx_en ?
+			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+		-EFAULT : 0;
+}
+
 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
 	struct gfar_private *priv = netdev_priv(dev);
@@ -854,7 +865,9 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 		return -EINVAL;
 
 	if (cmd == SIOCSHWTSTAMP)
-		return gfar_hwtstamp_ioctl(dev, rq, cmd);
+		return gfar_hwtstamp_set(dev, rq);
+	if (cmd == SIOCGHWTSTAMP)
+		return gfar_hwtstamp_get(dev, rq);
 
 	if (!priv->phydev)
 		return -ENODEV;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 114c58f9d8d2..52bb2b0195cc 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -29,7 +29,6 @@
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index d3d7ede27ef1..63d234419cc1 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -22,7 +22,6 @@
 #include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/interrupt.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -889,11 +888,9 @@ static int gfar_set_hash_opts(struct gfar_private *priv,
 
 static int gfar_check_filer_hardware(struct gfar_private *priv)
 {
-	struct gfar __iomem *regs = NULL;
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	u32 i;
 
-	regs = priv->gfargrp[0].regs;
-
 	/* Check if we are in FIFO mode */
 	i = gfar_read(&regs->ecntrl);
 	i &= ECNTRL_FIFM;
@@ -927,7 +924,7 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
 	/* Sets the properties for arbitrary filer rule
 	 * to the first 4 Layer 4 Bytes
 	 */
-	regs->rbifx = 0xC0C1C2C3;
+	gfar_write(&regs->rbifx, 0xC0C1C2C3);
 	return 0;
 }
 
@@ -1055,10 +1052,18 @@ static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
 			      struct ethtool_tcpip4_spec *mask,
 			      struct filer_table *tab)
 {
-	gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
-	gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
-	gfar_set_attribute(value->pdst, mask->pdst, RQFCR_PID_DPT, tab);
-	gfar_set_attribute(value->psrc, mask->psrc, RQFCR_PID_SPT, tab);
+	gfar_set_attribute(be32_to_cpu(value->ip4src),
+			   be32_to_cpu(mask->ip4src),
+			   RQFCR_PID_SIA, tab);
+	gfar_set_attribute(be32_to_cpu(value->ip4dst),
+			   be32_to_cpu(mask->ip4dst),
+			   RQFCR_PID_DIA, tab);
+	gfar_set_attribute(be16_to_cpu(value->pdst),
+			   be16_to_cpu(mask->pdst),
+			   RQFCR_PID_DPT, tab);
+	gfar_set_attribute(be16_to_cpu(value->psrc),
+			   be16_to_cpu(mask->psrc),
+			   RQFCR_PID_SPT, tab);
 	gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
 }
 
@@ -1067,12 +1072,17 @@ static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
 			     struct ethtool_usrip4_spec *mask,
 			     struct filer_table *tab)
 {
-	gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
-	gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
+	gfar_set_attribute(be32_to_cpu(value->ip4src),
+			   be32_to_cpu(mask->ip4src),
+			   RQFCR_PID_SIA, tab);
+	gfar_set_attribute(be32_to_cpu(value->ip4dst),
+			   be32_to_cpu(mask->ip4dst),
+			   RQFCR_PID_DIA, tab);
 	gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
 	gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
-	gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB,
-			   tab);
+	gfar_set_attribute(be32_to_cpu(value->l4_4_bytes),
+			   be32_to_cpu(mask->l4_4_bytes),
+			   RQFCR_PID_ARB, tab);
 
 }
 
@@ -1139,7 +1149,41 @@ static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
 		}
 	}
 
-	gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab);
+	gfar_set_attribute(be16_to_cpu(value->h_proto),
+			   be16_to_cpu(mask->h_proto),
+			   RQFCR_PID_ETY, tab);
+}
+
+static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule)
+{
+	return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK;
+}
+
+static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule)
+{
+	return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK;
+}
+
+static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule)
+{
+	return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK;
+}
+
+static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule)
+{
+	return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK;
+}
+
+static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule)
+{
+	return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >>
+		VLAN_PRIO_SHIFT;
+}
+
+static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule)
+{
+	return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >>
+		VLAN_PRIO_SHIFT;
 }
 
 /* Convert a rule to binary filter format of gianfar */
@@ -1153,22 +1197,21 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
 	u32 old_index = tab->index;
 
 	/* Check if vlan is wanted */
-	if ((rule->flow_type & FLOW_EXT) && (rule->m_ext.vlan_tci != 0xFFFF)) {
+	if ((rule->flow_type & FLOW_EXT) &&
+	    (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) {
 		if (!rule->m_ext.vlan_tci)
-			rule->m_ext.vlan_tci = 0xFFFF;
+			rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF);
 
 		vlan = RQFPR_VLN;
 		vlan_mask = RQFPR_VLN;
 
 		/* Separate the fields */
-		id = rule->h_ext.vlan_tci & VLAN_VID_MASK;
-		id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK;
-		cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK;
-		cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK;
-		prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >>
-		       VLAN_PRIO_SHIFT;
-		prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >>
-			    VLAN_PRIO_SHIFT;
+		id = vlan_tci_vid(rule);
+		id_mask = vlan_tci_vidm(rule);
+		cfi = vlan_tci_cfi(rule);
+		cfi_mask = vlan_tci_cfim(rule);
+		prio = vlan_tci_prio(rule);
+		prio_mask = vlan_tci_priom(rule);
 
 		if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
 			vlan |= RQFPR_CFI;
@@ -1666,10 +1709,10 @@ static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
 	for (i = 0; i < sizeof(flow->m_u); i++)
 		flow->m_u.hdata[i] ^= 0xFF;
 
-	flow->m_ext.vlan_etype ^= 0xFFFF;
-	flow->m_ext.vlan_tci ^= 0xFFFF;
-	flow->m_ext.data[0] ^= ~0;
-	flow->m_ext.data[1] ^= ~0;
+	flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF);
+	flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF);
+	flow->m_ext.data[0] ^= cpu_to_be32(~0);
+	flow->m_ext.data[1] ^= cpu_to_be32(~0);
 }
 
 static int gfar_add_cls(struct gfar_private *priv,
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index e006a09ba899..abc28da27042 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -22,7 +22,6 @@
 
 #include <linux/device.h>
 #include <linux/hrtimer.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -134,7 +133,7 @@ struct gianfar_ptp_registers {
 #define REG_SIZE	sizeof(struct gianfar_ptp_registers)
 
 struct etsects {
-	struct gianfar_ptp_registers *regs;
+	struct gianfar_ptp_registers __iomem *regs;
 	spinlock_t lock; /* protects regs */
 	struct ptp_clock *clock;
 	struct ptp_clock_info caps;
diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c
index acb55af7e3f3..e02dd1378751 100644
--- a/drivers/net/ethernet/freescale/gianfar_sysfs.c
+++ b/drivers/net/ethernet/freescale/gianfar_sysfs.c
@@ -24,7 +24,6 @@
 #include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/unistd.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/etherdevice.h>
 #include <linux/spinlock.h>
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 5548b6d00c31..72291a8904a9 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -435,11 +435,6 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
 		     QE_CR_PROTOCOL_ETHERNET, 0);
 }
 
-static inline int compare_addr(u8 **addr1, u8 **addr2)
-{
-	return memcmp(addr1, addr2, ETH_ALEN);
-}
-
 #ifdef DEBUG
 static void get_statistics(struct ucc_geth_private *ugeth,
 			   struct ucc_geth_tx_firmware_statistics *
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index e79aaf9ae52a..413329eff2ff 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -16,7 +16,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/stddef.h>
 #include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index ef46b58cb4e9..7becab1aa3e4 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -35,7 +35,6 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/ptrace.h>
 #include <linux/slab.h>
 #include <linux/string.h>
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
index f42f1b707733..d787fdd5db7b 100644
--- a/drivers/net/ethernet/i825xx/lasi_82596.c
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -79,7 +79,6 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
-#include <linux/init.h>
 #include <linux/types.h>
 #include <linux/bitops.h>
 #include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index 861fa15e1e81..17fca323c143 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -78,7 +78,6 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
-#include <linux/init.h>
 #include <linux/types.h>
 #include <linux/bitops.h>
 #include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 4ceae9a30274..372fa8d1fda1 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -13,7 +13,6 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
-#include <linux/init.h>
 #include <linux/types.h>
 #include <linux/bitops.h>
 #include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 70074792bdef..67f342a9f65e 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -26,7 +26,6 @@
 #define __IBM_NEWEMAC_CORE_H
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/list.h>
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 952d795230a4..cde0fd941f0c 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -12,8 +12,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * Copyright (C) IBM Corporation, 2003, 2010
  *
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 84066bafe057..451ba7949e15 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -12,8 +12,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * Copyright (C) IBM Corporation, 2003, 2010
  *
diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h
index abb300a31912..a21e4f5702b5 100644
--- a/drivers/net/ethernet/icplus/ipg.h
+++ b/drivers/net/ethernet/icplus/ipg.h
@@ -18,7 +18,6 @@
 #include <linux/types.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
-#include <linux/init.h>
 #include <linux/skbuff.h>
 #include <asm/bitops.h>
 
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 149ac85b5f9e..bb9f0ba9d164 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -220,12 +220,12 @@ config IXGBE_DCB
 	  If unsure, say N.
 
 config IXGBEVF
-	tristate "Intel(R) 82599 Virtual Function Ethernet support"
+	tristate "Intel(R) 10GbE PCI Express Virtual Function Ethernet support"
 	depends on PCI_MSI
 	---help---
-	  This driver supports Intel(R) 82599 virtual functions.  For more
-	  information on how to identify your adapter, go to the Adapter &
-	  Driver ID Guide at:
+	  This driver supports Intel(R) PCI Express virtual functions for the
+	  Intel(R) ixgbe driver.  For more information on how to identify your
+	  adapter, go to the Adapter & Driver ID Guide at:
 
 	  <http://support.intel.com/support/network/sb/CS-008441.htm>
 
@@ -243,6 +243,7 @@ config IXGBEVF
 
 config I40E
 	tristate "Intel(R) Ethernet Controller XL710 Family support"
+	select PTP_1588_CLOCK
 	depends on PCI
 	---help---
 	  This driver supports Intel(R) Ethernet Controller XL710 Family of
@@ -259,4 +260,44 @@ config I40E
 	  To compile this driver as a module, choose M here. The module
 	  will be called i40e.
 
+config I40E_VXLAN
+	bool "Virtual eXtensible Local Area Network Support"
+	default n
+	depends on I40E && VXLAN && !(I40E=y && VXLAN=m)
+	---help---
+	  This allows one to create VXLAN virtual interfaces that provide
+	  Layer 2 Networks over Layer 3 Networks. VXLAN is often used
+	  to tunnel virtual network infrastructure in virtualized environments.
+	  Say Y here if you want to use Virtual eXtensible Local Area Network
+	  (VXLAN) in the driver.
+
+config I40E_DCB
+	bool "Data Center Bridging (DCB) Support"
+	default n
+	depends on I40E && DCB
+	---help---
+	  Say Y here if you want to use Data Center Bridging (DCB) in the
+	  driver.
+
+	  If unsure, say N.
+
+config I40EVF
+	tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
+	depends on PCI_MSI
+	---help---
+	  This driver supports Intel(R) XL710 and X710 virtual functions.
+	  For more information on how to identify your adapter, go to the
+	  Adapter & Driver ID Guide at:
+
+	  <http://support.intel.com/support/network/sb/CS-008441.htm>
+
+	  For general information and support, go to the Intel support
+	  website at:
+
+	  <http://support.intel.com>
+
+	  To compile this driver as a module, choose M here. The module
+	  will be called i40evf.  MSI-X interrupt support is required
+	  for this driver to work correctly.
+
 endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index 5bae933efc7c..cdbbca8a3755 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_IXGBE) += ixgbe/
 obj-$(CONFIG_IXGBEVF) += ixgbevf/
 obj-$(CONFIG_I40E) += i40e/
 obj-$(CONFIG_IXGB) += ixgb/
+obj-$(CONFIG_I40EVF) += i40evf/
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index f9313b36c887..10a0f221b183 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -36,7 +36,6 @@
 #include <linux/module.h>
 #include <linux/types.h>
 #include <asm/byteorder.h>
-#include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/errno.h>
 #include <linux/ioport.h>
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 6d14eea17918..6d91933c4cdd 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5790,7 +5790,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
  * specified. Matching the kind of event packet is not supported, with the
  * exception of "all V2 events regardless of level 2 or 4".
  **/
-static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
+static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct hwtstamp_config config;
@@ -5825,6 +5825,14 @@ static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
 			    sizeof(config)) ? -EFAULT : 0;
 }
 
+static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
+			    sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
+}
+
 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 {
 	switch (cmd) {
@@ -5833,7 +5841,9 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 	case SIOCSMIIREG:
 		return e1000_mii_ioctl(netdev, ifr, cmd);
 	case SIOCSHWTSTAMP:
-		return e1000e_hwtstamp_ioctl(netdev, ifr);
+		return e1000e_hwtstamp_set(netdev, ifr);
+	case SIOCGHWTSTAMP:
+		return e1000e_hwtstamp_get(netdev, ifr);
 	default:
 		return -EOPNOTSUPP;
 	}
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 479b2c4e552d..d9eb80acac4f 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -1,7 +1,7 @@
 ################################################################################
 #
 # Intel Ethernet Controller XL710 Family Linux Driver
-# Copyright(c) 2013 Intel Corporation.
+# Copyright(c) 2013 - 2014 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
 # FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 # more details.
 #
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# You should have received a copy of the GNU General Public License along
+# with this program.  If not, see <http://www.gnu.org/licenses/>.
 #
 # The full GNU General Public License is included in this distribution in
 # the file called "COPYING".
@@ -41,4 +40,7 @@ i40e-objs := i40e_main.o \
 	i40e_debugfs.o	\
 	i40e_diag.o	\
 	i40e_txrx.o	\
+	i40e_ptp.o	\
 	i40e_virtchnl_pf.o
+
+i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 1ca9834cdfda..72dae4d97b43 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -29,7 +28,7 @@
 #define _I40E_H_
 
 #include <net/tcp.h>
-#include <linux/init.h>
+#include <net/udp.h>
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/module.h>
@@ -50,11 +49,15 @@
 #include <net/ip6_checksum.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
 #include "i40e_type.h"
 #include "i40e_prototype.h"
 #include "i40e_virtchnl.h"
 #include "i40e_virtchnl_pf.h"
 #include "i40e_txrx.h"
+#include "i40e_dcb.h"
 
 /* Useful i40e defaults */
 #define I40E_BASE_PF_SEID     16
@@ -63,7 +66,7 @@
 #define I40E_MAX_VEB          16
 
 #define I40E_MAX_NUM_DESCRIPTORS      4096
-#define I40E_MAX_REGISTER     0x0038FFFF
+#define I40E_MAX_REGISTER     0x800000
 #define I40E_DEFAULT_NUM_DESCRIPTORS  512
 #define I40E_REQ_DESCRIPTOR_MULTIPLE  32
 #define I40E_MIN_NUM_DESCRIPTORS      64
@@ -72,6 +75,7 @@
 #define I40E_DEFAULT_QUEUES_PER_VMDQ  2 /* max 16 qps */
 #define I40E_DEFAULT_QUEUES_PER_VF    4
 #define I40E_DEFAULT_QUEUES_PER_TC    1 /* should be a power of 2 */
+#define I40E_MAX_QUEUES_PER_TC        64 /* should be a power of 2 */
 #define I40E_FDIR_RING                0
 #define I40E_FDIR_RING_COUNT          32
 #define I40E_MAX_AQ_BUF_SIZE          4096
@@ -81,11 +85,13 @@
 #define I40E_DEFAULT_MSG_ENABLE       4
 
 #define I40E_NVM_VERSION_LO_SHIFT  0
-#define I40E_NVM_VERSION_LO_MASK   (0xf << I40E_NVM_VERSION_LO_SHIFT)
-#define I40E_NVM_VERSION_MID_SHIFT 4
-#define I40E_NVM_VERSION_MID_MASK  (0xff << I40E_NVM_VERSION_MID_SHIFT)
-#define I40E_NVM_VERSION_HI_SHIFT  12
-#define I40E_NVM_VERSION_HI_MASK   (0xf << I40E_NVM_VERSION_HI_SHIFT)
+#define I40E_NVM_VERSION_LO_MASK   (0xff << I40E_NVM_VERSION_LO_SHIFT)
+#define I40E_NVM_VERSION_HI_SHIFT  8
+#define I40E_NVM_VERSION_HI_MASK   (0xff << I40E_NVM_VERSION_HI_SHIFT)
+
+/* The values in here are decimal coded as hex as is the case in the NVM map*/
+#define I40E_CURRENT_NVM_VERSION_HI 0x2
+#define I40E_CURRENT_NVM_VERSION_LO 0x30
 
 /* magic for getting defines into strings */
 #define STRINGIFY(foo)  #foo
@@ -127,7 +133,9 @@ enum i40e_state_t {
 	__I40E_PF_RESET_REQUESTED,
 	__I40E_CORE_RESET_REQUESTED,
 	__I40E_GLOBAL_RESET_REQUESTED,
+	__I40E_EMP_RESET_REQUESTED,
 	__I40E_FILTER_OVERFLOW_PROMISC,
+	__I40E_SUSPENDED,
 };
 
 enum i40e_interrupt_policy {
@@ -157,6 +165,8 @@ struct i40e_fdir_data {
 	u8  *raw_packet;
 };
 
+#define I40E_ETH_P_LLDP			0x88cc
+
 #define I40E_DCB_PRIO_TYPE_STRICT	0
 #define I40E_DCB_PRIO_TYPE_ETS		1
 #define I40E_DCB_STRICT_PRIO_CREDITS	127
@@ -191,14 +201,20 @@ struct i40e_pf {
 	u16 num_vmdq_msix;         /* num queue vectors per vmdq pool */
 	u16 num_req_vfs;           /* num vfs requested for this vf */
 	u16 num_vf_qps;            /* num queue pairs per vf */
-	u16 num_tc_qps;            /* num queue pairs per TC */
 	u16 num_lan_qps;           /* num lan queues this pf has set up */
 	u16 num_lan_msix;          /* num queue vectors for the base pf vsi */
+	int queues_left;           /* queues left unclaimed */
 	u16 rss_size;              /* num queues in the RSS array */
 	u16 rss_size_max;          /* HW defined max RSS queues */
 	u16 fdir_pf_filter_count;  /* num of guaranteed filters for this PF */
 	u8 atr_sample_rate;
+	bool wol_en;
 
+#ifdef CONFIG_I40E_VXLAN
+	__be16  vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
+	u16 pending_vxlan_bitmap;
+
+#endif
 	enum i40e_interrupt_policy int_policy;
 	u16 rx_itr_default;
 	u16 tx_itr_default;
@@ -216,24 +232,24 @@ struct i40e_pf {
 #define I40E_FLAG_RX_1BUF_ENABLED              (u64)(1 << 4)
 #define I40E_FLAG_RX_PS_ENABLED                (u64)(1 << 5)
 #define I40E_FLAG_RSS_ENABLED                  (u64)(1 << 6)
-#define I40E_FLAG_MQ_ENABLED                   (u64)(1 << 7)
-#define I40E_FLAG_VMDQ_ENABLED                 (u64)(1 << 8)
-#define I40E_FLAG_FDIR_REQUIRES_REINIT         (u64)(1 << 9)
-#define I40E_FLAG_NEED_LINK_UPDATE             (u64)(1 << 10)
-#define I40E_FLAG_IN_NETPOLL                   (u64)(1 << 13)
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED       (u64)(1 << 14)
-#define I40E_FLAG_CLEAN_ADMINQ                 (u64)(1 << 15)
-#define I40E_FLAG_FILTER_SYNC                  (u64)(1 << 16)
-#define I40E_FLAG_PROCESS_MDD_EVENT            (u64)(1 << 18)
-#define I40E_FLAG_PROCESS_VFLR_EVENT           (u64)(1 << 19)
-#define I40E_FLAG_SRIOV_ENABLED                (u64)(1 << 20)
-#define I40E_FLAG_DCB_ENABLED                  (u64)(1 << 21)
-#define I40E_FLAG_FDIR_ENABLED                 (u64)(1 << 22)
-#define I40E_FLAG_FDIR_ATR_ENABLED             (u64)(1 << 23)
-#define I40E_FLAG_MFP_ENABLED                  (u64)(1 << 27)
-
-	u16 num_tx_queues;
-	u16 num_rx_queues;
+#define I40E_FLAG_VMDQ_ENABLED                 (u64)(1 << 7)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT         (u64)(1 << 8)
+#define I40E_FLAG_NEED_LINK_UPDATE             (u64)(1 << 9)
+#define I40E_FLAG_IN_NETPOLL                   (u64)(1 << 12)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED       (u64)(1 << 13)
+#define I40E_FLAG_CLEAN_ADMINQ                 (u64)(1 << 14)
+#define I40E_FLAG_FILTER_SYNC                  (u64)(1 << 15)
+#define I40E_FLAG_PROCESS_MDD_EVENT            (u64)(1 << 17)
+#define I40E_FLAG_PROCESS_VFLR_EVENT           (u64)(1 << 18)
+#define I40E_FLAG_SRIOV_ENABLED                (u64)(1 << 19)
+#define I40E_FLAG_DCB_ENABLED                  (u64)(1 << 20)
+#define I40E_FLAG_FD_SB_ENABLED                (u64)(1 << 21)
+#define I40E_FLAG_FD_ATR_ENABLED               (u64)(1 << 22)
+#define I40E_FLAG_PTP                          (u64)(1 << 25)
+#define I40E_FLAG_MFP_ENABLED                  (u64)(1 << 26)
+#ifdef CONFIG_I40E_VXLAN
+#define I40E_FLAG_VXLAN_FILTER_SYNC            (u64)(1 << 27)
+#endif
 
 	bool stat_offsets_loaded;
 	struct i40e_hw_port_stats stats;
@@ -247,6 +263,7 @@ struct i40e_pf {
 	u16 globr_count; /* Global reset count */
 	u16 empr_count; /* EMP reset count */
 	u16 pfr_count; /* PF reset count */
+	u16 sw_int_count; /* SW interrupt count */
 
 	struct mutex switch_mutex;
 	u16 lan_vsi;       /* our default LAN VSI */
@@ -270,6 +287,8 @@ struct i40e_pf {
 	struct dentry *i40e_dbg_pf;
 #endif /* CONFIG_DEBUG_FS */
 
+	u16 instance; /* A unique number per i40e_pf instance in the system */
+
 	/* sr-iov config info */
 	struct i40e_vf *vf;
 	int num_alloc_vfs;	/* actual number of VFs allocated */
@@ -287,6 +306,20 @@ struct i40e_pf {
 	u32	fcoe_hmc_filt_num;
 	u32	fcoe_hmc_cntx_num;
 	struct i40e_filter_control_settings filter_settings;
+
+	struct ptp_clock *ptp_clock;
+	struct ptp_clock_info ptp_caps;
+	struct sk_buff *ptp_tx_skb;
+	struct work_struct ptp_tx_work;
+	struct hwtstamp_config tstamp_config;
+	unsigned long ptp_tx_start;
+	unsigned long last_rx_ptp_check;
+	spinlock_t tmreg_lock; /* Used to protect the device time registers. */
+	u64 ptp_base_adj;
+	u32 tx_hwtstamp_timeouts;
+	u32 rx_hwtstamp_cleared;
+	bool ptp_tx;
+	bool ptp_rx;
 };
 
 struct i40e_mac_filter {
@@ -441,13 +474,11 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw)
 	static char buf[32];
 
 	snprintf(buf, sizeof(buf),
-		 "f%d.%d a%d.%d n%02d.%02d.%02d e%08x",
+		 "f%d.%d a%d.%d n%02x.%02x e%08x",
 		 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
 		 hw->aq.api_maj_ver, hw->aq.api_min_ver,
 		 (hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
 						>> I40E_NVM_VERSION_HI_SHIFT,
-		 (hw->nvm.version & I40E_NVM_VERSION_MID_MASK)
-						>> I40E_NVM_VERSION_MID_SHIFT,
 		 (hw->nvm.version & I40E_NVM_VERSION_LO_MASK)
 						>> I40E_NVM_VERSION_LO_SHIFT,
 		 hw->nvm.eetrack);
@@ -495,6 +526,7 @@ int i40e_up(struct i40e_vsi *vsi);
 void i40e_down(struct i40e_vsi *vsi);
 extern const char i40e_driver_name[];
 extern const char i40e_driver_version_str[];
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
 void i40e_update_stats(struct i40e_vsi *vsi);
 void i40e_update_eth_stats(struct i40e_vsi *vsi);
@@ -502,13 +534,6 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
 int i40e_fetch_switch_configuration(struct i40e_pf *pf,
 				    bool printconfig);
 
-/* needed by i40e_main.c */
-void i40e_add_fdir_filter(struct i40e_fdir_data fdir_data,
-			  struct i40e_ring *tx_ring);
-void i40e_add_remove_filter(struct i40e_fdir_data fdir_data,
-			    struct i40e_ring *tx_ring);
-void i40e_update_fdir_filter(struct i40e_fdir_data fdir_data,
-			     struct i40e_ring *tx_ring);
 int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
 			     struct i40e_pf *pf, bool add);
 
@@ -524,10 +549,13 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
 int i40e_vsi_release(struct i40e_vsi *vsi);
 struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
 				 struct i40e_vsi *start_vsi);
+int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
 				u16 downlink_seid, u8 enabled_tc);
 void i40e_veb_release(struct i40e_veb *veb);
 
+int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc);
 i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);
 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);
 void i40e_vsi_reset_stats(struct i40e_vsi *vsi);
@@ -544,6 +572,7 @@ static inline void i40e_dbg_init(void) {}
 static inline void i40e_dbg_exit(void) {}
 #endif /* CONFIG_DEBUG_FS*/
 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
@@ -555,5 +584,21 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
 				      bool is_vf, bool is_netdev);
 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
-
+#ifdef CONFIG_I40E_DCB
+void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+			   struct i40e_dcbx_config *new_cfg);
+void i40e_dcbnl_set_all(struct i40e_vsi *vsi);
+void i40e_dcbnl_setup(struct i40e_vsi *vsi);
+bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
+			    struct i40e_dcbx_config *old_cfg,
+			    struct i40e_dcbx_config *new_cfg);
+#endif /* CONFIG_I40E_DCB */
+void i40e_ptp_rx_hang(struct i40e_vsi *vsi);
+void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf);
+void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
+void i40e_ptp_set_increment(struct i40e_pf *pf);
+int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+void i40e_ptp_init(struct i40e_pf *pf);
+void i40e_ptp_stop(struct i40e_pf *pf);
 #endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index cfef7fc32cdd..a50e6b3479ae 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -31,6 +30,8 @@
 #include "i40e_adminq.h"
 #include "i40e_prototype.h"
 
+static void i40e_resume_aq(struct i40e_hw *hw);
+
 /**
  *  i40e_adminq_init_regs - Initialize AdminQ registers
  *  @hw: pointer to the hardware structure
@@ -43,13 +44,17 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
 	if (hw->mac.type == I40E_MAC_VF) {
 		hw->aq.asq.tail = I40E_VF_ATQT1;
 		hw->aq.asq.head = I40E_VF_ATQH1;
+		hw->aq.asq.len  = I40E_VF_ATQLEN1;
 		hw->aq.arq.tail = I40E_VF_ARQT1;
 		hw->aq.arq.head = I40E_VF_ARQH1;
+		hw->aq.arq.len  = I40E_VF_ARQLEN1;
 	} else {
 		hw->aq.asq.tail = I40E_PF_ATQT;
 		hw->aq.asq.head = I40E_PF_ATQH;
+		hw->aq.asq.len  = I40E_PF_ATQLEN;
 		hw->aq.arq.tail = I40E_PF_ARQT;
 		hw->aq.arq.head = I40E_PF_ARQH;
+		hw->aq.arq.len  = I40E_PF_ARQLEN;
 	}
 }
 
@@ -60,9 +65,8 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
 static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
 {
 	i40e_status ret_code;
-	struct i40e_virt_mem mem;
 
-	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
+	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
 					 i40e_mem_atq_ring,
 					 (hw->aq.num_asq_entries *
 					 sizeof(struct i40e_aq_desc)),
@@ -70,21 +74,14 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
 	if (ret_code)
 		return ret_code;
 
-	hw->aq.asq.desc = hw->aq.asq_mem.va;
-	hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;
-
-	ret_code = i40e_allocate_virt_mem(hw, &mem,
+	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
 					  (hw->aq.num_asq_entries *
 					  sizeof(struct i40e_asq_cmd_details)));
 	if (ret_code) {
-		i40e_free_dma_mem(hw, &hw->aq.asq_mem);
-		hw->aq.asq_mem.va = NULL;
-		hw->aq.asq_mem.pa = 0;
+		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
 		return ret_code;
 	}
 
-	hw->aq.asq.details = mem.va;
-
 	return ret_code;
 }
 
@@ -96,16 +93,11 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
 {
 	i40e_status ret_code;
 
-	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
+	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
 					 i40e_mem_arq_ring,
 					 (hw->aq.num_arq_entries *
 					 sizeof(struct i40e_aq_desc)),
 					 I40E_ADMINQ_DESC_ALIGNMENT);
-	if (ret_code)
-		return ret_code;
-
-	hw->aq.arq.desc = hw->aq.arq_mem.va;
-	hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;
 
 	return ret_code;
 }
@@ -119,14 +111,7 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
  **/
 static void i40e_free_adminq_asq(struct i40e_hw *hw)
 {
-	struct i40e_virt_mem mem;
-
-	i40e_free_dma_mem(hw, &hw->aq.asq_mem);
-	hw->aq.asq_mem.va = NULL;
-	hw->aq.asq_mem.pa = 0;
-	mem.va = hw->aq.asq.details;
-	i40e_free_virt_mem(hw, &mem);
-	hw->aq.asq.details = NULL;
+	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
 }
 
 /**
@@ -138,20 +123,17 @@ static void i40e_free_adminq_asq(struct i40e_hw *hw)
  **/
 static void i40e_free_adminq_arq(struct i40e_hw *hw)
 {
-	i40e_free_dma_mem(hw, &hw->aq.arq_mem);
-	hw->aq.arq_mem.va = NULL;
-	hw->aq.arq_mem.pa = 0;
+	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
 }
 
 /**
  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
- *  @hw:     pointer to the hardware structure
+ *  @hw: pointer to the hardware structure
  **/
 static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
 {
 	i40e_status ret_code;
 	struct i40e_aq_desc *desc;
-	struct i40e_virt_mem mem;
 	struct i40e_dma_mem *bi;
 	int i;
 
@@ -160,11 +142,11 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
 	 */
 
 	/* buffer_info structures do not need alignment */
-	ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
-					  sizeof(struct i40e_dma_mem)));
+	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
 	if (ret_code)
 		goto alloc_arq_bufs;
-	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
+	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
 
 	/* allocate the mapped buffers */
 	for (i = 0; i < hw->aq.num_arq_entries; i++) {
@@ -206,29 +188,27 @@ unwind_alloc_arq_bufs:
 	i--;
 	for (; i >= 0; i--)
 		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
-	mem.va = hw->aq.arq.r.arq_bi;
-	i40e_free_virt_mem(hw, &mem);
+	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
 
 	return ret_code;
 }
 
 /**
  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
- *  @hw:     pointer to the hardware structure
+ *  @hw: pointer to the hardware structure
  **/
 static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
 {
 	i40e_status ret_code;
-	struct i40e_virt_mem mem;
 	struct i40e_dma_mem *bi;
 	int i;
 
 	/* No mapped memory needed yet, just the buffer info structures */
-	ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
-					  sizeof(struct i40e_dma_mem)));
+	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
 	if (ret_code)
 		goto alloc_asq_bufs;
-	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
+	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
 
 	/* allocate the mapped buffers */
 	for (i = 0; i < hw->aq.num_asq_entries; i++) {
@@ -248,35 +228,36 @@ unwind_alloc_asq_bufs:
 	i--;
 	for (; i >= 0; i--)
 		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
-	mem.va = hw->aq.asq.r.asq_bi;
-	i40e_free_virt_mem(hw, &mem);
+	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
 
 	return ret_code;
 }
 
 /**
  *  i40e_free_arq_bufs - Free receive queue buffer info elements
- *  @hw:     pointer to the hardware structure
+ *  @hw: pointer to the hardware structure
  **/
 static void i40e_free_arq_bufs(struct i40e_hw *hw)
 {
-	struct i40e_virt_mem mem;
 	int i;
 
+	/* free descriptors */
 	for (i = 0; i < hw->aq.num_arq_entries; i++)
 		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
 
-	mem.va = hw->aq.arq.r.arq_bi;
-	i40e_free_virt_mem(hw, &mem);
+	/* free the descriptor memory */
+	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+	/* free the dma header */
+	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
 }
 
 /**
  *  i40e_free_asq_bufs - Free send queue buffer info elements
- *  @hw:     pointer to the hardware structure
+ *  @hw: pointer to the hardware structure
  **/
 static void i40e_free_asq_bufs(struct i40e_hw *hw)
 {
-	struct i40e_virt_mem mem;
 	int i;
 
 	/* only unmap if the address is non-NULL */
@@ -284,14 +265,19 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
 		if (hw->aq.asq.r.asq_bi[i].pa)
 			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
 
-	/* now free the buffer info list */
-	mem.va = hw->aq.asq.r.asq_bi;
-	i40e_free_virt_mem(hw, &mem);
+	/* free the buffer info list */
+	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+	/* free the descriptor memory */
+	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+	/* free the dma header */
+	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
 }
 
 /**
  *  i40e_config_asq_regs - configure ASQ registers
- *  @hw:     pointer to the hardware structure
+ *  @hw: pointer to the hardware structure
  *
  *  Configure base address and length registers for the transmit queue
  **/
@@ -299,14 +285,18 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
 {
 	if (hw->mac.type == I40E_MAC_VF) {
 		/* configure the transmit queue */
-		wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
-		wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
+		wr32(hw, I40E_VF_ATQBAH1,
+		    upper_32_bits(hw->aq.asq.desc_buf.pa));
+		wr32(hw, I40E_VF_ATQBAL1,
+		    lower_32_bits(hw->aq.asq.desc_buf.pa));
 		wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
 					  I40E_VF_ATQLEN1_ATQENABLE_MASK));
 	} else {
 		/* configure the transmit queue */
-		wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
-		wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
+		wr32(hw, I40E_PF_ATQBAH,
+		    upper_32_bits(hw->aq.asq.desc_buf.pa));
+		wr32(hw, I40E_PF_ATQBAL,
+		    lower_32_bits(hw->aq.asq.desc_buf.pa));
 		wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
 					  I40E_PF_ATQLEN_ATQENABLE_MASK));
 	}
@@ -314,7 +304,7 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
 
 /**
  *  i40e_config_arq_regs - ARQ register configuration
- *  @hw:     pointer to the hardware structure
+ *  @hw: pointer to the hardware structure
  *
  * Configure base address and length registers for the receive (event queue)
  **/
@@ -322,14 +312,18 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
 {
 	if (hw->mac.type == I40E_MAC_VF) {
 		/* configure the receive queue */
-		wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
-		wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
+		wr32(hw, I40E_VF_ARQBAH1,
+		    upper_32_bits(hw->aq.arq.desc_buf.pa));
+		wr32(hw, I40E_VF_ARQBAL1,
+		    lower_32_bits(hw->aq.arq.desc_buf.pa));
 		wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
 					  I40E_VF_ARQLEN1_ARQENABLE_MASK));
 	} else {
 		/* configure the receive queue */
-		wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
-		wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
+		wr32(hw, I40E_PF_ARQBAH,
+		    upper_32_bits(hw->aq.arq.desc_buf.pa));
+		wr32(hw, I40E_PF_ARQBAL,
+		    lower_32_bits(hw->aq.arq.desc_buf.pa));
 		wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
 					  I40E_PF_ARQLEN_ARQENABLE_MASK));
 	}
@@ -340,7 +334,7 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
 
 /**
  *  i40e_init_asq - main initialization routine for ASQ
- *  @hw:     pointer to the hardware structure
+ *  @hw: pointer to the hardware structure
  *
  *  This is the main initialization routine for the Admin Send Queue
  *  Prior to calling this function, drivers *MUST* set the following fields
@@ -397,7 +391,7 @@ init_adminq_exit:
 
 /**
  *  i40e_init_arq - initialize ARQ
- *  @hw:     pointer to the hardware structure
+ *  @hw: pointer to the hardware structure
  *
  *  The main initialization routine for the Admin Receive (Event) Queue.
  *  Prior to calling this function, drivers *MUST* set the following fields
@@ -454,7 +448,7 @@ init_adminq_exit:
 
 /**
  *  i40e_shutdown_asq - shutdown the ASQ
- *  @hw:     pointer to the hardware structure
+ *  @hw: pointer to the hardware structure
  *
  *  The main shutdown routine for the Admin Send Queue
  **/
@@ -466,10 +460,9 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
 		return I40E_ERR_NOT_READY;
 
 	/* Stop firmware AdminQ processing */
-	if (hw->mac.type == I40E_MAC_VF)
-		wr32(hw, I40E_VF_ATQLEN1, 0);
-	else
-		wr32(hw, I40E_PF_ATQLEN, 0);
+	wr32(hw, hw->aq.asq.head, 0);
+	wr32(hw, hw->aq.asq.tail, 0);
+	wr32(hw, hw->aq.asq.len, 0);
 
 	/* make sure lock is available */
 	mutex_lock(&hw->aq.asq_mutex);
@@ -478,8 +471,6 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
 
 	/* free ring buffers */
 	i40e_free_asq_bufs(hw);
-	/* free the ring descriptors */
-	i40e_free_adminq_asq(hw);
 
 	mutex_unlock(&hw->aq.asq_mutex);
 
@@ -488,7 +479,7 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
 
 /**
  *  i40e_shutdown_arq - shutdown ARQ
- *  @hw:     pointer to the hardware structure
+ *  @hw: pointer to the hardware structure
  *
  *  The main shutdown routine for the Admin Receive Queue
  **/
@@ -500,10 +491,9 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
 		return I40E_ERR_NOT_READY;
 
 	/* Stop firmware AdminQ processing */
-	if (hw->mac.type == I40E_MAC_VF)
-		wr32(hw, I40E_VF_ARQLEN1, 0);
-	else
-		wr32(hw, I40E_PF_ARQLEN, 0);
+	wr32(hw, hw->aq.arq.head, 0);
+	wr32(hw, hw->aq.arq.tail, 0);
+	wr32(hw, hw->aq.arq.len, 0);
 
 	/* make sure lock is available */
 	mutex_lock(&hw->aq.arq_mutex);
@@ -512,8 +502,6 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
 
 	/* free ring buffers */
 	i40e_free_arq_bufs(hw);
-	/* free the ring descriptors */
-	i40e_free_adminq_arq(hw);
 
 	mutex_unlock(&hw->aq.arq_mutex);
 
@@ -522,7 +510,7 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
 
 /**
  *  i40e_init_adminq - main initialization routine for Admin Queue
- *  @hw:     pointer to the hardware structure
+ *  @hw: pointer to the hardware structure
  *
  *  Prior to calling this function, drivers *MUST* set the following fields
  *  in the hw->aq structure:
@@ -533,8 +521,9 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
  **/
 i40e_status i40e_init_adminq(struct i40e_hw *hw)
 {
-	u16 eetrack_lo, eetrack_hi;
 	i40e_status ret_code;
+	u16 eetrack_lo, eetrack_hi;
+	int retry = 0;
 
 	/* verify input for valid configuration */
 	if ((hw->aq.num_arq_entries == 0) ||
@@ -562,23 +551,41 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
 	if (ret_code)
 		goto init_adminq_free_asq;
 
-	ret_code = i40e_aq_get_firmware_version(hw,
-				     &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver,
-				     &hw->aq.api_maj_ver, &hw->aq.api_min_ver,
-				     NULL);
-	if (ret_code)
+	/* There are some cases where the firmware may not be quite ready
+	 * for AdminQ operations, so we retry the AdminQ setup a few times
+	 * if we see timeouts in this first AQ call.
+	 */
+	do {
+		ret_code = i40e_aq_get_firmware_version(hw,
+							&hw->aq.fw_maj_ver,
+							&hw->aq.fw_min_ver,
+							&hw->aq.api_maj_ver,
+							&hw->aq.api_min_ver,
+							NULL);
+		if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+			break;
+		retry++;
+		msleep(100);
+		i40e_resume_aq(hw);
+	} while (retry < 10);
+	if (ret_code != I40E_SUCCESS)
 		goto init_adminq_free_arq;
 
-	if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
-	    hw->aq.api_min_ver != I40E_FW_API_VERSION_MINOR) {
-		ret_code = I40E_ERR_FIRMWARE_API_VERSION;
-		goto init_adminq_free_arq;
-	}
+	/* get the NVM version info */
 	i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
 	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
 	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
 	hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
 
+	if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
+	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) {
+		ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+		goto init_adminq_free_arq;
+	}
+
+	/* pre-emptive resource lock release */
+	i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+
 	ret_code = i40e_aq_set_hmc_resource_profile(hw,
 						    I40E_HMC_PROFILE_DEFAULT,
 						    0,
@@ -600,12 +607,15 @@ init_adminq_exit:
 
 /**
  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
- *  @hw:     pointer to the hardware structure
+ *  @hw: pointer to the hardware structure
  **/
 i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
 {
 	i40e_status ret_code = 0;
 
+	if (i40e_check_asq_alive(hw))
+		i40e_aq_queue_shutdown(hw, true);
+
 	i40e_shutdown_asq(hw);
 	i40e_shutdown_arq(hw);
 
@@ -616,7 +626,7 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
 
 /**
  *  i40e_clean_asq - cleans Admin send queue
- *  @asq: pointer to the adminq send ring
+ *  @hw: pointer to the hardware structure
  *
  *  returns the number of free desc
  **/
@@ -659,12 +669,12 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
  *  Returns true if the firmware has processed all descriptors on the
  *  admin send queue. Returns false if there are still requests pending.
  **/
-bool i40e_asq_done(struct i40e_hw *hw)
+static bool i40e_asq_done(struct i40e_hw *hw)
 {
 	/* AQ designers suggest use of head for better
 	 * timing reliability than DD bit
 	 */
-	return (rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use);
+	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
 
 }
 
@@ -674,7 +684,7 @@ bool i40e_asq_done(struct i40e_hw *hw)
  *  @desc: prefilled descriptor describing the command (non DMA mem)
  *  @buff: buffer to use for indirect commands
  *  @buff_size: size of buffer for indirect commands
- *  @opaque: pointer to info to be used in async cleanup
+ *  @cmd_details: pointer to command details structure
  *
  *  This is the main send command driver routine for the Admin Queue send
  *  queue.  It runs the queue, cleans the queue, etc
@@ -854,7 +864,7 @@ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
 	/* zero out the desc */
 	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
 	desc->opcode = cpu_to_le16(opcode);
-	desc->flags = cpu_to_le16(I40E_AQ_FLAG_EI | I40E_AQ_FLAG_SI);
+	desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
 }
 
 /**
@@ -912,7 +922,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
 			   "AQRX: Event received with error 0x%X.\n",
 			   hw->aq.arq_last_status);
 	} else {
-		memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc));
+		e->desc = *desc;
 		datalen = le16_to_cpu(desc->datalen);
 		e->msg_size = min(datalen, e->msg_size);
 		if (e->msg_buf != NULL && (e->msg_size != 0))
@@ -925,6 +935,11 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
 	 * size
 	 */
 	bi = &hw->aq.arq.r.arq_bi[ntc];
+	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+
+	desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+		desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
 	desc->datalen = cpu_to_le16((u16)bi->size);
 	desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
 	desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
@@ -947,36 +962,16 @@ clean_arq_element_out:
 	return ret_code;
 }
 
-void i40e_resume_aq(struct i40e_hw *hw)
+static void i40e_resume_aq(struct i40e_hw *hw)
 {
-	u32 reg = 0;
-
 	/* Registers are reset after PF reset */
 	hw->aq.asq.next_to_use = 0;
 	hw->aq.asq.next_to_clean = 0;
 
 	i40e_config_asq_regs(hw);
-	reg = hw->aq.num_asq_entries;
-
-	if (hw->mac.type == I40E_MAC_VF) {
-		reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
-		wr32(hw, I40E_VF_ATQLEN1, reg);
-	} else {
-		reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
-		wr32(hw, I40E_PF_ATQLEN, reg);
-	}
 
 	hw->aq.arq.next_to_use = 0;
 	hw->aq.arq.next_to_clean = 0;
 
 	i40e_config_arq_regs(hw);
-	reg = hw->aq.num_arq_entries;
-
-	if (hw->mac.type == I40E_MAC_VF) {
-		reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
-		wr32(hw, I40E_VF_ARQLEN1, reg);
-	} else {
-		reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
-		wr32(hw, I40E_PF_ARQLEN, reg);
-	}
 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 22e5ed683e47..993f7685a911 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -32,20 +31,20 @@
 #include "i40e_adminq_cmd.h"
 
 #define I40E_ADMINQ_DESC(R, i)   \
-	(&(((struct i40e_aq_desc *)((R).desc))[i]))
+	(&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
 
 #define I40E_ADMINQ_DESC_ALIGNMENT 4096
 
 struct i40e_adminq_ring {
-	void *desc;		/* Descriptor ring memory */
-	void *details;		/* ASQ details */
+	struct i40e_virt_mem dma_head;	/* space for dma structures */
+	struct i40e_dma_mem desc_buf;	/* descriptor ring memory */
+	struct i40e_virt_mem cmd_buf;	/* command buffer memory */
 
 	union {
 		struct i40e_dma_mem *asq_bi;
 		struct i40e_dma_mem *arq_bi;
 	} r;
 
-	u64 dma_addr;		/* Physical address of the ring */
 	u16 count;		/* Number of descriptors */
 	u16 rx_buf_len;		/* Admin Receive Queue buffer length */
 
@@ -56,6 +55,7 @@ struct i40e_adminq_ring {
 	/* used for queue tracking */
 	u32 head;
 	u32 tail;
+	u32 len;
 };
 
 /* ASQ transaction details */
@@ -69,7 +69,7 @@ struct i40e_asq_cmd_details {
 };
 
 #define I40E_ADMINQ_DETAILS(R, i)   \
-	(&(((struct i40e_asq_cmd_details *)((R).details))[i]))
+	(&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
 
 /* ARQ event information */
 struct i40e_arq_event_info {
@@ -94,9 +94,6 @@ struct i40e_adminq_info {
 	struct mutex asq_mutex; /* Send queue lock */
 	struct mutex arq_mutex; /* Receive queue lock */
 
-	struct i40e_dma_mem asq_mem;    /* send queue dynamic memory */
-	struct i40e_dma_mem arq_mem;    /* receive queue dynamic memory */
-
 	/* last status values on send and receive queues */
 	enum i40e_admin_queue_err asq_last_status;
 	enum i40e_admin_queue_err arq_last_status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index e61ebdd5a5f9..7b6374a8f8da 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -35,7 +34,7 @@
  */
 
 #define I40E_FW_API_VERSION_MAJOR  0x0001
-#define I40E_FW_API_VERSION_MINOR  0x0000
+#define I40E_FW_API_VERSION_MINOR  0x0001
 
 struct i40e_aq_desc {
 	__le16 flags;
@@ -137,10 +136,13 @@ enum i40e_admin_queue_opc {
 	i40e_aqc_opc_set_ns_proxy_entry     = 0x0105,
 
 	/* LAA */
-	i40e_aqc_opc_mng_laa                = 0x0106,
+	i40e_aqc_opc_mng_laa                = 0x0106,   /* AQ obsolete */
 	i40e_aqc_opc_mac_address_read       = 0x0107,
 	i40e_aqc_opc_mac_address_write      = 0x0108,
 
+	/* PXE */
+	i40e_aqc_opc_clear_pxe_mode         = 0x0110,
+
 	/* internal switch commands */
 	i40e_aqc_opc_get_switch_config         = 0x0200,
 	i40e_aqc_opc_add_statistics            = 0x0201,
@@ -317,13 +319,15 @@ struct i40e_aqc_get_version {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
 
-/* Send driver version (direct 0x0002) */
+/* Send driver version (indirect 0x0002) */
 struct i40e_aqc_driver_version {
 	u8     driver_major_ver;
 	u8     driver_minor_ver;
 	u8     driver_build_ver;
 	u8     driver_subbuild_ver;
-	u8     reserved[12];
+	u8     reserved[4];
+	__le32 address_high;
+	__le32 address_low;
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
@@ -479,7 +483,7 @@ struct i40e_aqc_mng_laa {
 	u8     reserved2[6];
 };
 
-/* Manage MAC Address Read Command (0x0107) */
+/* Manage MAC Address Read Command (indirect 0x0107) */
 struct i40e_aqc_mac_address_read {
 	__le16	command_flags;
 #define I40E_AQC_LAN_ADDR_VALID   0x10
@@ -517,6 +521,16 @@ struct i40e_aqc_mac_address_write {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
 
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response  (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+	u8	rx_cnt;
+	u8	reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
 /* Switch configuration commands (0x02xx) */
 
 /* Used by many indirect commands that only pass an seid and a buffer in the
@@ -639,13 +653,15 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
 	u8     reserved2[6];
 };
 
-/* Add VSI (indirect 0x210)
+/* Add VSI (indirect 0x0210)
  *    this indirect command uses struct i40e_aqc_vsi_properties_data
  *    as the indirect buffer (128 bytes)
  *
- * Update VSI (indirect 0x211) Get VSI (indirect 0x0212)
- *    use the generic i40e_aqc_switch_seid descriptor format
- *    use the same completion and data structure as Add VSI
+ * Update VSI (indirect 0x211)
+ *     uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ *     uses the same completion and data structure as Add VSI
  */
 struct i40e_aqc_add_get_update_vsi {
 	__le16 uplink_seid;
@@ -664,7 +680,6 @@ struct i40e_aqc_add_get_update_vsi {
 #define I40E_AQ_VSI_TYPE_PF             0x2
 #define I40E_AQ_VSI_TYPE_EMP_MNG        0x3
 #define I40E_AQ_VSI_FLAG_CASCADED_PV    0x4
-#define I40E_AQ_VSI_FLAG_CLOUD_VSI      0x8
 	__le32 addr_high;
 	__le32 addr_low;
 };
@@ -1026,7 +1041,9 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
 #define I40E_AQC_SET_VSI_PROMISC_VLAN        0x10
 	__le16 seid;
 #define I40E_AQC_VSI_PROM_CMD_SEID_MASK      0x3FF
-	u8     reserved[10];
+	__le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID          0x8000
+	u8     reserved[8];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
@@ -1179,33 +1196,46 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
 		} v4;
 		struct {
 			u8 data[16];
-			} v6;
-		} ipaddr;
+		} v6;
+	} ipaddr;
 	__le16 flags;
 #define I40E_AQC_ADD_CLOUD_FILTER_SHIFT                 0
 #define I40E_AQC_ADD_CLOUD_FILTER_MASK                  (0x3F << \
 					I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+/* 0x0000 reserved */
 #define I40E_AQC_ADD_CLOUD_FILTER_OIP                   0x0001
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE               0x0002
+/* 0x0002 reserved */
 #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN            0x0003
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE        0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID     0x0004
+/* 0x0005 reserved */
 #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID           0x0006
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL        0x0007
+/* 0x0007 reserved */
 /* 0x0008 reserved */
 #define I40E_AQC_ADD_CLOUD_FILTER_OMAC                  0x0009
 #define I40E_AQC_ADD_CLOUD_FILTER_IMAC                  0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC      0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP                   0x000C
+
 #define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE               0x0080
 #define I40E_AQC_ADD_CLOUD_VNK_SHIFT                    6
 #define I40E_AQC_ADD_CLOUD_VNK_MASK                     0x00C0
 #define I40E_AQC_ADD_CLOUD_FLAGS_IPV4                   0
 #define I40E_AQC_ADD_CLOUD_FLAGS_IPV6                   0x0100
-	__le32 key_low;
-	__le32 key_high;
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT               9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK                0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN               0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC          1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE                 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP                  3
+
+	__le32 tenant_id;
+	u8     reserved[4];
 	__le16 queue_number;
 #define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT                  0
 #define I40E_AQC_ADD_CLOUD_QUEUE_MASK                   (0x3F << \
 					I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
-	u8     reserved[14];
+	u8     reserved2[14];
 	/* response section */
 	u8     allocation_result;
 #define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS         0x0
@@ -1548,7 +1578,7 @@ struct i40e_aqc_module_desc {
 
 struct i40e_aq_get_phy_abilities_resp {
 	__le32 phy_type;       /* bitmap using the above enum for offsets */
-	u8     link_speed;     /* bitmap using the above enum */
+	u8     link_speed;     /* bitmap using the above enum bit patterns */
 	u8     abilities;
 #define I40E_AQ_PHY_FLAG_PAUSE_TX         0x01
 #define I40E_AQ_PHY_FLAG_PAUSE_RX         0x02
@@ -1582,6 +1612,10 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
 	__le32 phy_type;
 	u8     link_speed;
 	u8     abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK		0x08
+#define I40E_AQ_PHY_ENABLE_AN		0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK	0x20
 	__le16 eee_capability;
 	__le32 eeer;
 	u8     low_power_ctrl;
@@ -1914,22 +1948,33 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
 /* Add Udp Tunnel command and completion (direct 0x0B00) */
 struct i40e_aqc_add_udp_tunnel {
 	__le16 udp_port;
-	u8     header_len; /* in DWords, 1 to 15 */
-	u8     protocol_index;
-#define I40E_AQC_TUNNEL_TYPE_MAC    0x0
-#define I40E_AQC_TUNNEL_TYPE_UDP    0x1
-	u8     reserved[12];
+	u8     reserved0[3];
+	u8     protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_VXLAN	0x00
+#define I40E_AQC_TUNNEL_TYPE_NGE	0x01
+#define I40E_AQC_TUNNEL_TYPE_TEREDO	0x10
+	u8     reserved1[10];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
 
+struct i40e_aqc_add_udp_tunnel_completion {
+	__le16 udp_port;
+	u8     filter_entry_index;
+	u8     multiple_pfs;
+#define I40E_AQC_SINGLE_PF	0x0
+#define I40E_AQC_MULTIPLE_PFS	0x1
+	u8     total_filters;
+	u8     reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
 /* remove UDP Tunnel command (0x0B01) */
 struct i40e_aqc_remove_udp_tunnel {
 	u8     reserved[2];
 	u8     index; /* 0 to 15 */
-	u8     pf_filters;
-	u8     total_filters;
-	u8     reserved2[11];
+	u8     reserved2[13];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
@@ -1937,28 +1982,32 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
 struct i40e_aqc_del_udp_tunnel_completion {
 	__le16 udp_port;
 	u8     index; /* 0 to 15 */
-	u8     multiple_entries;
-	u8     tunnels_used;
-	u8     reserved;
-	u8     tunnels_free;
-	u8     reserved1[9];
+	u8     multiple_pfs;
+	u8     total_filters_used;
+	u8     reserved1[11];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
 
 /* tunnel key structure 0x0B10 */
+
 struct i40e_aqc_tunnel_key_structure {
-	__le16     key1_off;
-	__le16     key1_len;
-	__le16     key2_off;
-	__le16     key2_len;
-	__le16     flags;
+	u8	key1_off;
+	u8	key2_off;
+	u8	key1_len;  /* 0 to 15 */
+	u8	key2_len;  /* 0 to 15 */
+	u8	flags;
 #define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
 /* response flags */
 #define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS    0x01
 #define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED   0x02
 #define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
-	u8         resreved[6];
+	u8	network_key_index;
+#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN		0x0
+#define I40E_AQC_NETWORK_KEY_INDEX_NGE			0x1
+#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP	0x2
+#define I40E_AQC_NETWORK_KEY_INDEX_GRE			0x3
+	u8	reserved[10];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
@@ -2052,6 +2101,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
 #define I40E_AQ_CLUSTER_ID_DCB		8
 #define I40E_AQ_CLUSTER_ID_EMP_MEM	9
 #define I40E_AQ_CLUSTER_ID_PKT_BUF	10
+#define I40E_AQ_CLUSTER_ID_ALTRAM	11
 
 struct i40e_aqc_debug_dump_internals {
 	u8     cluster_id;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
index 3b1cc214f9dc..926811ad44ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 1e4ea134975a..e7f38b57834d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -43,20 +42,20 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
 
 	if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
 		switch (hw->device_id) {
-		case I40E_SFP_XL710_DEVICE_ID:
-		case I40E_SFP_X710_DEVICE_ID:
-		case I40E_QEMU_DEVICE_ID:
-		case I40E_KX_A_DEVICE_ID:
-		case I40E_KX_B_DEVICE_ID:
-		case I40E_KX_C_DEVICE_ID:
-		case I40E_KX_D_DEVICE_ID:
-		case I40E_QSFP_A_DEVICE_ID:
-		case I40E_QSFP_B_DEVICE_ID:
-		case I40E_QSFP_C_DEVICE_ID:
+		case I40E_DEV_ID_SFP_XL710:
+		case I40E_DEV_ID_SFP_X710:
+		case I40E_DEV_ID_QEMU:
+		case I40E_DEV_ID_KX_A:
+		case I40E_DEV_ID_KX_B:
+		case I40E_DEV_ID_KX_C:
+		case I40E_DEV_ID_KX_D:
+		case I40E_DEV_ID_QSFP_A:
+		case I40E_DEV_ID_QSFP_B:
+		case I40E_DEV_ID_QSFP_C:
 			hw->mac.type = I40E_MAC_XL710;
 			break;
-		case I40E_VF_DEVICE_ID:
-		case I40E_VF_HV_DEVICE_ID:
+		case I40E_DEV_ID_VF:
+		case I40E_DEV_ID_VF_HV:
 			hw->mac.type = I40E_MAC_VF;
 			break;
 		default:
@@ -75,7 +74,8 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
 /**
  * i40e_debug_aq
  * @hw: debug mask related to admin queue
- * @cap: pointer to adminq command descriptor
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
  * @buffer: pointer to command buffer
  *
  * Dumps debug log about adminq command with descriptor contents.
@@ -126,6 +126,43 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
 }
 
 /**
+ * i40e_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool i40e_check_asq_alive(struct i40e_hw *hw)
+{
+	return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+}
+
+/**
+ * i40e_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+					     bool unloading)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_queue_shutdown *cmd =
+		(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_queue_shutdown);
+
+	if (unloading)
+		cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+	return status;
+}
+
+/**
  * i40e_init_shared_code - Initialize the shared code
  * @hw: pointer to hardware structure
  *
@@ -142,14 +179,6 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
 	i40e_status status = 0;
 	u32 reg;
 
-	hw->phy.get_link_info = true;
-
-	/* Determine port number */
-	reg = rd32(hw, I40E_PFGEN_PORTNUM);
-	reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
-	       I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
-	hw->port = (u8)reg;
-
 	i40e_set_mac_type(hw);
 
 	switch (hw->mac.type) {
@@ -160,6 +189,21 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
 		break;
 	}
 
+	hw->phy.get_link_info = true;
+
+	/* Determine port number */
+	reg = rd32(hw, I40E_PFGEN_PORTNUM);
+	reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
+	       I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
+	hw->port = (u8)reg;
+
+	/* Determine the PF number based on the PCI fn */
+	reg = rd32(hw, I40E_GLPCI_CAPSUP);
+	if (reg & I40E_GLPCI_CAPSUP_ARI_EN_MASK)
+		hw->pf_id = (u8)((hw->bus.device << 3) | hw->bus.func);
+	else
+		hw->pf_id = (u8)hw->bus.func;
+
 	status = i40e_init_nvm(hw);
 	return status;
 }
@@ -210,8 +254,11 @@ i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
 	i40e_fill_default_direct_cmd_desc(&desc,
 					  i40e_aqc_opc_mac_address_write);
 	cmd_data->command_flags = cpu_to_le16(flags);
-	memcpy(&cmd_data->mac_sal, &mac_addr[0], 4);
-	memcpy(&cmd_data->mac_sah, &mac_addr[4], 2);
+	cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
+	cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
+					((u32)mac_addr[3] << 16) |
+					((u32)mac_addr[4] << 8) |
+					mac_addr[5]);
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
@@ -240,32 +287,53 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
 }
 
 /**
- * i40e_validate_mac_addr - Validate MAC address
- * @mac_addr: pointer to MAC address
- *
- * Tests a MAC address to ensure it is a valid Individual Address
+ * i40e_get_media_type - Gets media type
+ * @hw: pointer to the hardware structure
  **/
-i40e_status i40e_validate_mac_addr(u8 *mac_addr)
+static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
 {
-	i40e_status status = 0;
-
-	/* Make sure it is not a multicast address */
-	if (I40E_IS_MULTICAST(mac_addr)) {
-		hw_dbg(hw, "MAC address is multicast\n");
-		status = I40E_ERR_INVALID_MAC_ADDR;
-	/* Not a broadcast address */
-	} else if (I40E_IS_BROADCAST(mac_addr)) {
-		hw_dbg(hw, "MAC address is broadcast\n");
-		status = I40E_ERR_INVALID_MAC_ADDR;
-	/* Reject the zero address */
-	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
-		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
-		hw_dbg(hw, "MAC address is all zeros\n");
-		status = I40E_ERR_INVALID_MAC_ADDR;
+	enum i40e_media_type media;
+
+	switch (hw->phy.link_info.phy_type) {
+	case I40E_PHY_TYPE_10GBASE_SR:
+	case I40E_PHY_TYPE_10GBASE_LR:
+	case I40E_PHY_TYPE_40GBASE_SR4:
+	case I40E_PHY_TYPE_40GBASE_LR4:
+		media = I40E_MEDIA_TYPE_FIBER;
+		break;
+	case I40E_PHY_TYPE_100BASE_TX:
+	case I40E_PHY_TYPE_1000BASE_T:
+	case I40E_PHY_TYPE_10GBASE_T:
+		media = I40E_MEDIA_TYPE_BASET;
+		break;
+	case I40E_PHY_TYPE_10GBASE_CR1_CU:
+	case I40E_PHY_TYPE_40GBASE_CR4_CU:
+	case I40E_PHY_TYPE_10GBASE_CR1:
+	case I40E_PHY_TYPE_40GBASE_CR4:
+	case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+		media = I40E_MEDIA_TYPE_DA;
+		break;
+	case I40E_PHY_TYPE_1000BASE_KX:
+	case I40E_PHY_TYPE_10GBASE_KX4:
+	case I40E_PHY_TYPE_10GBASE_KR:
+	case I40E_PHY_TYPE_40GBASE_KR4:
+		media = I40E_MEDIA_TYPE_BACKPLANE;
+		break;
+	case I40E_PHY_TYPE_SGMII:
+	case I40E_PHY_TYPE_XAUI:
+	case I40E_PHY_TYPE_XFI:
+	case I40E_PHY_TYPE_XLAUI:
+	case I40E_PHY_TYPE_XLPPI:
+	default:
+		media = I40E_MEDIA_TYPE_UNKNOWN;
+		break;
 	}
-	return status;
+
+	return media;
 }
 
+#define I40E_PF_RESET_WAIT_COUNT_A0	200
+#define I40E_PF_RESET_WAIT_COUNT	10
 /**
  * i40e_pf_reset - Reset the PF
  * @hw: pointer to the hardware structure
@@ -275,7 +343,8 @@ i40e_status i40e_validate_mac_addr(u8 *mac_addr)
  **/
 i40e_status i40e_pf_reset(struct i40e_hw *hw)
 {
-	u32 wait_cnt = 0;
+	u32 cnt = 0;
+	u32 cnt1 = 0;
 	u32 reg = 0;
 	u32 grst_del;
 
@@ -285,7 +354,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
 	 */
 	grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
 			>> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
-	for (wait_cnt = 0; wait_cnt < grst_del + 2; wait_cnt++) {
+	for (cnt = 0; cnt < grst_del + 2; cnt++) {
 		reg = rd32(hw, I40E_GLGEN_RSTAT);
 		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
 			break;
@@ -296,17 +365,37 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
 		return I40E_ERR_RESET_FAILED;
 	}
 
-	/* Determine the PF number based on the PCI fn */
-	hw->pf_id = (u8)hw->bus.func;
+	/* Now Wait for the FW to be ready */
+	for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
+		reg = rd32(hw, I40E_GLNVM_ULD);
+		reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+			I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
+		if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+			    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
+			hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
+			break;
+		}
+		usleep_range(10000, 20000);
+	}
+	if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+		     I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
+		hw_dbg(hw, "wait for FW Reset complete timedout\n");
+		hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
+		return I40E_ERR_RESET_FAILED;
+	}
 
 	/* If there was a Global Reset in progress when we got here,
 	 * we don't need to do the PF Reset
 	 */
-	if (!wait_cnt) {
+	if (!cnt) {
+		if (hw->revision_id == 0)
+			cnt = I40E_PF_RESET_WAIT_COUNT_A0;
+		else
+			cnt = I40E_PF_RESET_WAIT_COUNT;
 		reg = rd32(hw, I40E_PFGEN_CTRL);
 		wr32(hw, I40E_PFGEN_CTRL,
 		     (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
-		for (wait_cnt = 0; wait_cnt < 10; wait_cnt++) {
+		for (; cnt; cnt--) {
 			reg = rd32(hw, I40E_PFGEN_CTRL);
 			if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
 				break;
@@ -319,6 +408,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
 	}
 
 	i40e_clear_pxe_mode(hw);
+
 	return 0;
 }
 
@@ -335,9 +425,47 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)
 
 	/* Clear single descriptor fetch/write-back mode */
 	reg = rd32(hw, I40E_GLLAN_RCTL_0);
-	wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
+
+	if (hw->revision_id == 0) {
+		/* As a work around clear PXE_MODE instead of setting it */
+		wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
+	} else {
+		wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
+	}
+}
+
+/**
+ * i40e_led_is_mine - helper to find matching led
+ * @hw: pointer to the hw struct
+ * @idx: index into GPIO registers
+ *
+ * returns: 0 if no match, otherwise the value of the GPIO_CTL register
+ */
+static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
+{
+	u32 gpio_val = 0;
+	u32 port;
+
+	if (!hw->func_caps.led[idx])
+		return 0;
+
+	gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
+	port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
+		I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+	/* if PRT_NUM_NA is 1 then this LED is not port specific, OR
+	 * if it is not our port then ignore
+	 */
+	if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
+	    (port != hw->port))
+		return 0;
+
+	return gpio_val;
 }
 
+#define I40E_LED0 22
+#define I40E_LINK_ACTIVITY 0xC
+
 /**
  * i40e_led_get - return current on/off mode
  * @hw: pointer to the hw struct
@@ -349,24 +477,20 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)
  **/
 u32 i40e_led_get(struct i40e_hw *hw)
 {
-	u32 gpio_val = 0;
 	u32 mode = 0;
-	u32 port;
 	int i;
 
-	for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
-		if (!hw->func_caps.led[i])
-			continue;
-
-		gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
-		port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
-			>> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+	/* as per the documentation GPIO 22-29 are the LED
+	 * GPIO pins named LED0..LED7
+	 */
+	for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+		u32 gpio_val = i40e_led_is_mine(hw, i);
 
-		if (port != hw->port)
+		if (!gpio_val)
 			continue;
 
-		mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
-				>> I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT;
+		mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
+			I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
 		break;
 	}
 
@@ -376,60 +500,45 @@ u32 i40e_led_get(struct i40e_hw *hw)
 /**
  * i40e_led_set - set new on/off mode
  * @hw: pointer to the hw struct
- * @mode: 0=off, else on (see EAS for mode details)
+ * @mode: 0=off, 0xf=on (else see manual for mode details)
+ * @blink: true if the LED should blink when on, false if steady
+ *
+ * if this function is used to turn on the blink it should
+ * be used to disable the blink when restoring the original state.
  **/
-void i40e_led_set(struct i40e_hw *hw, u32 mode)
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
 {
-	u32 gpio_val = 0;
-	u32 led_mode = 0;
-	u32 port;
 	int i;
 
-	for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
-		if (!hw->func_caps.led[i])
-			continue;
+	if (mode & 0xfffffff0)
+		hw_dbg(hw, "invalid mode passed in %X\n", mode);
 
-		gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
-		port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
-			>> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+	/* as per the documentation GPIO 22-29 are the LED
+	 * GPIO pins named LED0..LED7
+	 */
+	for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+		u32 gpio_val = i40e_led_is_mine(hw, i);
 
-		if (port != hw->port)
+		if (!gpio_val)
 			continue;
 
-		led_mode = (mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
-			    I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
 		gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
-		gpio_val |= led_mode;
+		/* this & is a bit of paranoia, but serves as a range check */
+		gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
+			     I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
+
+		if (mode == I40E_LINK_ACTIVITY)
+			blink = false;
+
+		gpio_val |= (blink ? 1 : 0) <<
+			    I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT;
+
 		wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
+		break;
 	}
 }
 
 /* Admin command wrappers */
-/**
- * i40e_aq_queue_shutdown
- * @hw: pointer to the hw struct
- * @unloading: is the driver unloading itself
- *
- * Tell the Firmware that we're shutting down the AdminQ and whether
- * or not the driver is unloading as well.
- **/
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
-					     bool unloading)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_queue_shutdown *cmd =
-		(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
-	i40e_status status;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					  i40e_aqc_opc_queue_shutdown);
-
-	if (unloading)
-		cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
-
-	return status;
-}
 
 /**
  * i40e_aq_set_link_restart_an
@@ -490,15 +599,16 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
 		goto aq_get_link_info_exit;
 
 	/* save off old link status information */
-	memcpy(&hw->phy.link_info_old, hw_link_info,
-	       sizeof(struct i40e_link_status));
+	hw->phy.link_info_old = *hw_link_info;
 
 	/* update link status */
 	hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
+	hw->phy.media_type = i40e_get_media_type(hw);
 	hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
 	hw_link_info->link_info = resp->link_info;
 	hw_link_info->an_info = resp->an_info;
 	hw_link_info->ext_info = resp->ext_info;
+	hw_link_info->loopback = resp->loopback;
 
 	if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
 		hw_link_info->lse_enable = true;
@@ -519,7 +629,7 @@ aq_get_link_info_exit:
 /**
  * i40e_aq_add_vsi
  * @hw: pointer to the hw struct
- * @vsi: pointer to a vsi context struct
+ * @vsi_ctx: pointer to a vsi context struct
  * @cmd_details: pointer to command details structure or NULL
  *
  * Add a VSI context to the hardware.
@@ -571,7 +681,8 @@ aq_add_vsi_exit:
  * @cmd_details: pointer to command details structure or NULL
  **/
 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
-				u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+				u16 seid, bool set,
+				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
@@ -665,7 +776,7 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
 /**
  * i40e_get_vsi_params - get VSI configuration info
  * @hw: pointer to the hw struct
- * @vsi: pointer to a vsi context struct
+ * @vsi_ctx: pointer to a vsi context struct
  * @cmd_details: pointer to command details structure or NULL
  **/
 i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
@@ -673,8 +784,8 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
 				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
-	struct i40e_aqc_switch_seid *cmd =
-		(struct i40e_aqc_switch_seid *)&desc.params.raw;
+	struct i40e_aqc_add_get_update_vsi *cmd =
+		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
 	struct i40e_aqc_add_get_update_vsi_completion *resp =
 		(struct i40e_aqc_add_get_update_vsi_completion *)
 		&desc.params.raw;
@@ -683,7 +794,7 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
 	i40e_fill_default_direct_cmd_desc(&desc,
 					  i40e_aqc_opc_get_vsi_parameters);
 
-	cmd->seid = cpu_to_le16(vsi_ctx->seid);
+	cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
 
 	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
 	if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
@@ -707,7 +818,7 @@ aq_get_vsi_params_exit:
 /**
  * i40e_aq_update_vsi_params
  * @hw: pointer to the hw struct
- * @vsi: pointer to a vsi context struct
+ * @vsi_ctx: pointer to a vsi context struct
  * @cmd_details: pointer to command details structure or NULL
  *
  * Update a VSI context.
@@ -717,13 +828,13 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
 				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
-	struct i40e_aqc_switch_seid *cmd =
-		(struct i40e_aqc_switch_seid *)&desc.params.raw;
+	struct i40e_aqc_add_get_update_vsi *cmd =
+		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
 	i40e_status status;
 
 	i40e_fill_default_direct_cmd_desc(&desc,
 					  i40e_aqc_opc_update_vsi_parameters);
-	cmd->seid = cpu_to_le16(vsi_ctx->seid);
+	cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
 
 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
 	if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
@@ -810,7 +921,6 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
 /**
  * i40e_aq_send_driver_version
  * @hw: pointer to the hw struct
- * @event: driver event: driver ok, start or stop
  * @dv: driver's major, minor version
  * @cmd_details: pointer to command details structure or NULL
  *
@@ -873,6 +983,7 @@ i40e_get_link_status_exit:
  * @downlink_seid: the VSI SEID
  * @enabled_tc: bitmap of TCs to be enabled
  * @default_port: true for default port VSI, false for control port
+ * @enable_l2_filtering: true to add L2 filter table rules to regular forwarding rules for cloud support
  * @veb_seid: pointer to where to put the resulting VEB SEID
  * @cmd_details: pointer to command details structure or NULL
  *
@@ -881,7 +992,8 @@ i40e_get_link_status_exit:
  **/
 i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
 				u16 downlink_seid, u8 enabled_tc,
-				bool default_port, u16 *veb_seid,
+				bool default_port, bool enable_l2_filtering,
+				u16 *veb_seid,
 				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
@@ -907,6 +1019,10 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
 		veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
 	else
 		veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
+
+	if (enable_l2_filtering)
+		veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER;
+
 	cmd->veb_flags = cpu_to_le16(veb_flags);
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -922,10 +1038,10 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
  * @hw: pointer to the hw struct
  * @veb_seid: the SEID of the VEB to query
  * @switch_id: the uplink switch id
- * @floating_veb: set to true if the VEB is floating
+ * @floating: set to true if the VEB is floating
  * @statistic_index: index of the stats counter block for this VEB
  * @vebs_used: number of VEB's used by function
- * @vebs_unallocated: total VEB's not reserved by any function
+ * @vebs_free: total VEB's not reserved by any function
  * @cmd_details: pointer to command details structure or NULL
  *
  * This retrieves the parameters for a particular VEB, specified by
@@ -1059,89 +1175,11 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
 }
 
 /**
- * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
- * @hw: pointer to the hw struct
- * @seid: VSI for the vlan filters
- * @v_list: list of vlan filters to be added
- * @count: length of the list
- * @cmd_details: pointer to command details structure or NULL
- **/
-i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
-			struct i40e_aqc_add_remove_vlan_element_data *v_list,
-			u8 count, struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_macvlan *cmd =
-		(struct i40e_aqc_macvlan *)&desc.params.raw;
-	i40e_status status;
-	u16 buf_size;
-
-	if (count == 0 || !v_list || !hw)
-		return I40E_ERR_PARAM;
-
-	buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
-
-	/* prep the rest of the request */
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
-	cmd->num_addresses = cpu_to_le16(count);
-	cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
-	cmd->seid[1] = 0;
-	cmd->seid[2] = 0;
-
-	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
-	if (buf_size > I40E_AQ_LARGE_BUF)
-		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
-
-	status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
-				       cmd_details);
-
-	return status;
-}
-
-/**
- * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
- * @hw: pointer to the hw struct
- * @seid: VSI for the vlan filters
- * @v_list: list of macvlans to be removed
- * @count: length of the list
- * @cmd_details: pointer to command details structure or NULL
- **/
-i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
-			struct i40e_aqc_add_remove_vlan_element_data *v_list,
-			u8 count, struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aqc_macvlan *cmd =
-		(struct i40e_aqc_macvlan *)&desc.params.raw;
-	i40e_status status;
-	u16 buf_size;
-
-	if (count == 0 || !v_list || !hw)
-		return I40E_ERR_PARAM;
-
-	buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
-
-	/* prep the rest of the request */
-	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
-	cmd->num_addresses = cpu_to_le16(count);
-	cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
-	cmd->seid[1] = 0;
-	cmd->seid[2] = 0;
-
-	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
-	if (buf_size > I40E_AQ_LARGE_BUF)
-		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
-
-	status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
-				       cmd_details);
-
-	return status;
-}
-
-/**
  * i40e_aq_send_msg_to_vf
  * @hw: pointer to the hardware structure
  * @vfid: vf id to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
  * @msg: pointer to the msg buffer
  * @msglen: msg length
  * @cmd_details: pointer to command details
@@ -1519,8 +1557,8 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
 				struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aqc_list_capabilites *cmd;
-	i40e_status status = 0;
 	struct i40e_aq_desc desc;
+	i40e_status status = 0;
 
 	cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
 
@@ -1681,6 +1719,63 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
 }
 
 /**
+ * i40e_aq_add_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @udp_port: the UDP port to add
+ * @header_len: length of the tunneling header length in DWords
+ * @protocol_index: protocol index type
+ * @filter_index: pointer to filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+				u16 udp_port, u8 header_len,
+				u8 protocol_index, u8 *filter_index,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_udp_tunnel *cmd =
+		(struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
+	struct i40e_aqc_del_udp_tunnel_completion *resp =
+		(struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+
+	cmd->udp_port = cpu_to_le16(udp_port);
+	cmd->protocol_type = protocol_index;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (!status)
+		*filter_index = resp->index;
+
+	return status;
+}
+
+/**
+ * i40e_aq_del_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @index: filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_remove_udp_tunnel *cmd =
+		(struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+
+	cmd->index = index;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
  * i40e_aq_delete_element - Delete switch element
  * @hw: pointer to the hw struct
  * @seid: the SEID to delete from the switch
@@ -1709,6 +1804,28 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
 }
 
 /**
+ * i40e_aq_dcb_updated - DCB Updated Command
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * EMP will return when the shared RPB settings have been
+ * recomputed and modified. The retval field in the descriptor
+ * will be set to 0 when RPB is modified.
+ **/
+i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	i40e_status status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
  * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
  * @hw: pointer to the hw struct
  * @seid: seid for the physical port/switching component/vsi
@@ -1787,6 +1904,40 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
 }
 
 /**
+ * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component connected to Physical Port
+ * @ets_data: Buffer holding ETS parameters
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+		u16 seid,
+		struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+		enum i40e_admin_queue_opc opcode,
+		struct i40e_asq_cmd_details *cmd_details)
+{
+	return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
+				    sizeof(*ets_data), opcode, cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+	u16 seid,
+	struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+	struct i40e_asq_cmd_details *cmd_details)
+{
+	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+			    i40e_aqc_opc_configure_switching_comp_bw_config,
+			    cmd_details);
+}
+
+/**
  * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
  * @hw: pointer to the hw struct
  * @seid: seid of the VSI
@@ -2039,3 +2190,110 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
 
 	return 0;
 }
+
+/**
+ * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
+ * @hw: pointer to the hw struct
+ * @mac_addr: MAC address to use in the filter
+ * @ethtype: Ethertype to use in the filter
+ * @flags: Flags that needs to be applied to the filter
+ * @vsi_seid: seid of the control VSI
+ * @queue: VSI queue number to send the packet to
+ * @is_add: Add control packet filter if True else remove
+ * @stats: Structure to hold information on control filter counts
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This command will Add or Remove control packet filter for a control VSI.
+ * In return it will update the total number of perfect filter count in
+ * the stats member.
+ **/
+i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+				u8 *mac_addr, u16 ethtype, u16 flags,
+				u16 vsi_seid, u16 queue, bool is_add,
+				struct i40e_control_filter_stats *stats,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_remove_control_packet_filter *cmd =
+		(struct i40e_aqc_add_remove_control_packet_filter *)
+		&desc.params.raw;
+	struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
+		(struct i40e_aqc_add_remove_control_packet_filter_completion *)
+		&desc.params.raw;
+	i40e_status status;
+
+	if (vsi_seid == 0)
+		return I40E_ERR_PARAM;
+
+	if (is_add) {
+		i40e_fill_default_direct_cmd_desc(&desc,
+				i40e_aqc_opc_add_control_packet_filter);
+		cmd->queue = cpu_to_le16(queue);
+	} else {
+		i40e_fill_default_direct_cmd_desc(&desc,
+				i40e_aqc_opc_remove_control_packet_filter);
+	}
+
+	if (mac_addr)
+		memcpy(cmd->mac, mac_addr, ETH_ALEN);
+
+	cmd->etype = cpu_to_le16(ethtype);
+	cmd->flags = cpu_to_le16(flags);
+	cmd->seid = cpu_to_le16(vsi_seid);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (!status && stats) {
+		stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
+		stats->etype_used = le16_to_cpu(resp->etype_used);
+		stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
+		stats->etype_free = le16_to_cpu(resp->etype_free);
+	}
+
+	return status;
+}
+
+/**
+ * i40e_set_pci_config_data - store PCI bus info
+ * @hw: pointer to hardware structure
+ * @link_status: the link status word from PCI config space
+ *
+ * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
+ **/
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
+{
+	hw->bus.type = i40e_bus_type_pci_express;
+
+	switch (link_status & PCI_EXP_LNKSTA_NLW) {
+	case PCI_EXP_LNKSTA_NLW_X1:
+		hw->bus.width = i40e_bus_width_pcie_x1;
+		break;
+	case PCI_EXP_LNKSTA_NLW_X2:
+		hw->bus.width = i40e_bus_width_pcie_x2;
+		break;
+	case PCI_EXP_LNKSTA_NLW_X4:
+		hw->bus.width = i40e_bus_width_pcie_x4;
+		break;
+	case PCI_EXP_LNKSTA_NLW_X8:
+		hw->bus.width = i40e_bus_width_pcie_x8;
+		break;
+	default:
+		hw->bus.width = i40e_bus_width_unknown;
+		break;
+	}
+
+	switch (link_status & PCI_EXP_LNKSTA_CLS) {
+	case PCI_EXP_LNKSTA_CLS_2_5GB:
+		hw->bus.speed = i40e_bus_speed_2500;
+		break;
+	case PCI_EXP_LNKSTA_CLS_5_0GB:
+		hw->bus.speed = i40e_bus_speed_5000;
+		break;
+	case PCI_EXP_LNKSTA_CLS_8_0GB:
+		hw->bus.speed = i40e_bus_speed_8000;
+		break;
+	default:
+		hw->bus.speed = i40e_bus_speed_unknown;
+		break;
+	}
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
new file mode 100644
index 000000000000..50730141bb7b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -0,0 +1,469 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_dcb.h"
+
+/**
+ * i40e_get_dcbx_status
+ * @hw: pointer to the hw struct
+ * @status: Embedded DCBX Engine Status
+ *
+ * Get the DCBX status from the Firmware
+ **/
+i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
+{
+	u32 reg;
+
+	if (!status)
+		return I40E_ERR_PARAM;
+
+	reg = rd32(hw, I40E_PRTDCB_GENS);
+	*status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
+			I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
+
+	return 0;
+}
+
+/**
+ * i40e_parse_ieee_etscfg_tlv
+ * @tlv: IEEE 802.1Qaz ETS CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
+				       struct i40e_dcbx_config *dcbcfg)
+{
+	struct i40e_ieee_ets_config *etscfg;
+	u8 *buf = tlv->tlvinfo;
+	u16 offset = 0;
+	u8 priority;
+	int i;
+
+	/* First Octet post subtype
+	 * --------------------------
+	 * |will-|CBS  | Re-  | Max |
+	 * |ing  |     |served| TCs |
+	 * --------------------------
+	 * |1bit | 1bit|3 bits|3bits|
+	 */
+	etscfg = &dcbcfg->etscfg;
+	etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >>
+			       I40E_IEEE_ETS_WILLING_SHIFT);
+	etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >>
+			   I40E_IEEE_ETS_CBS_SHIFT);
+	etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >>
+			      I40E_IEEE_ETS_MAXTC_SHIFT);
+
+	/* Move offset to Priority Assignment Table */
+	offset++;
+
+	/* Priority Assignment Table (4 octets)
+	 * Octets:|    1    |    2    |    3    |    4    |
+	 *        -----------------------------------------
+	 *        |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+	 *        -----------------------------------------
+	 *   Bits:|7  4|3  0|7  4|3  0|7  4|3  0|7  4|3  0|
+	 *        -----------------------------------------
+	 */
+	for (i = 0; i < 4; i++) {
+		priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+				I40E_IEEE_ETS_PRIO_1_SHIFT);
+		etscfg->prioritytable[i * 2] =  priority;
+		priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+				I40E_IEEE_ETS_PRIO_0_SHIFT);
+		etscfg->prioritytable[i * 2 + 1] = priority;
+		offset++;
+	}
+
+	/* TC Bandwidth Table (8 octets)
+	 * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+	 *        ---------------------------------
+	 *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+	 *        ---------------------------------
+	 */
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+		etscfg->tcbwtable[i] = buf[offset++];
+
+	/* TSA Assignment Table (8 octets)
+	 * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+	 *        ---------------------------------
+	 *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+	 *        ---------------------------------
+	 */
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+		etscfg->tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz ETS REC TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Parses IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+				       struct i40e_dcbx_config *dcbcfg)
+{
+	u8 *buf = tlv->tlvinfo;
+	u16 offset = 0;
+	u8 priority;
+	int i;
+
+	/* Move offset to priority table */
+	offset++;
+
+	/* Priority Assignment Table (4 octets)
+	 * Octets:|    1    |    2    |    3    |    4    |
+	 *        -----------------------------------------
+	 *        |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+	 *        -----------------------------------------
+	 *   Bits:|7  4|3  0|7  4|3  0|7  4|3  0|7  4|3  0|
+	 *        -----------------------------------------
+	 */
+	for (i = 0; i < 4; i++) {
+		priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+				I40E_IEEE_ETS_PRIO_1_SHIFT);
+		dcbcfg->etsrec.prioritytable[i*2] =  priority;
+		priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+				I40E_IEEE_ETS_PRIO_0_SHIFT);
+		dcbcfg->etsrec.prioritytable[i*2 + 1] = priority;
+		offset++;
+	}
+
+	/* TC Bandwidth Table (8 octets)
+	 * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+	 *        ---------------------------------
+	 *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+	 *        ---------------------------------
+	 */
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+		dcbcfg->etsrec.tcbwtable[i] = buf[offset++];
+
+	/* TSA Assignment Table (8 octets)
+	 * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+	 *        ---------------------------------
+	 *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+	 *        ---------------------------------
+	 */
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+		dcbcfg->etsrec.tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_pfccfg_tlv
+ * @tlv: IEEE 802.1Qaz PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv,
+				       struct i40e_dcbx_config *dcbcfg)
+{
+	u8 *buf = tlv->tlvinfo;
+
+	/* ----------------------------------------
+	 * |will-|MBC  | Re-  | PFC |  PFC Enable  |
+	 * |ing  |     |served| cap |              |
+	 * -----------------------------------------
+	 * |1bit | 1bit|2 bits|4bits| 1 octet      |
+	 */
+	dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >>
+				   I40E_IEEE_PFC_WILLING_SHIFT);
+	dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >>
+			       I40E_IEEE_PFC_MBC_SHIFT);
+	dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >>
+				  I40E_IEEE_PFC_CAP_SHIFT);
+	dcbcfg->pfc.pfcenable = buf[1];
+}
+
+/**
+ * i40e_parse_ieee_app_tlv
+ * @tlv: IEEE 802.1Qaz APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses IEEE 802.1Qaz APP PRIO TLV
+ **/
+static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
+				    struct i40e_dcbx_config *dcbcfg)
+{
+	u16 typelength;
+	u16 offset = 0;
+	u16 length;
+	int i = 0;
+	u8 *buf;
+
+	typelength = ntohs(tlv->typelength);
+	length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+		       I40E_LLDP_TLV_LEN_SHIFT);
+	buf = tlv->tlvinfo;
+
+	/* The App priority table starts 5 octets after TLV header */
+	length -= (sizeof(tlv->ouisubtype) + 1);
+
+	/* Move offset to App Priority Table */
+	offset++;
+
+	/* Application Priority Table (3 octets)
+	 * Octets:|         1          |    2    |    3    |
+	 *        -----------------------------------------
+	 *        |Priority|Rsrvd| Sel |    Protocol ID    |
+	 *        -----------------------------------------
+	 *   Bits:|23    21|20 19|18 16|15                0|
+	 *        -----------------------------------------
+	 */
+	while (offset < length) {
+		dcbcfg->app[i].priority = (u8)((buf[offset] &
+						I40E_IEEE_APP_PRIO_MASK) >>
+					       I40E_IEEE_APP_PRIO_SHIFT);
+		dcbcfg->app[i].selector = (u8)((buf[offset] &
+						I40E_IEEE_APP_SEL_MASK) >>
+					       I40E_IEEE_APP_SEL_SHIFT);
+		dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) |
+					     buf[offset + 2];
+		/* Move to next app */
+		offset += 3;
+		i++;
+		if (i >= I40E_DCBX_MAX_APPS)
+			break;
+	}
+
+	dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
+				struct i40e_dcbx_config *dcbcfg)
+{
+	u32 ouisubtype;
+	u8 subtype;
+
+	ouisubtype = ntohl(tlv->ouisubtype);
+	subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+		       I40E_LLDP_TLV_SUBTYPE_SHIFT);
+	switch (subtype) {
+	case I40E_IEEE_SUBTYPE_ETS_CFG:
+		i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg);
+		break;
+	case I40E_IEEE_SUBTYPE_ETS_REC:
+		i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg);
+		break;
+	case I40E_IEEE_SUBTYPE_PFC_CFG:
+		i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
+		break;
+	case I40E_IEEE_SUBTYPE_APP_PRI:
+		i40e_parse_ieee_app_tlv(tlv, dcbcfg);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * i40e_parse_org_tlv
+ * @tlv: Organization specific TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * will be returned
+ **/
+static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
+			       struct i40e_dcbx_config *dcbcfg)
+{
+	u32 ouisubtype;
+	u32 oui;
+
+	ouisubtype = ntohl(tlv->ouisubtype);
+	oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >>
+		    I40E_LLDP_TLV_OUI_SHIFT);
+	switch (oui) {
+	case I40E_IEEE_8021QAZ_OUI:
+		i40e_parse_ieee_tlv(tlv, dcbcfg);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * i40e_lldp_to_dcb_config
+ * @lldpmib: LLDPDU to be parsed
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Parse DCB configuration from the LLDPDU
+ **/
+i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
+				    struct i40e_dcbx_config *dcbcfg)
+{
+	i40e_status ret = 0;
+	struct i40e_lldp_org_tlv *tlv;
+	u16 type;
+	u16 length;
+	u16 typelength;
+
+	if (!lldpmib || !dcbcfg)
+		return I40E_ERR_PARAM;
+
+	/* set to the start of LLDPDU */
+	lldpmib += ETH_HLEN;
+	tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+	while (tlv) {
+		typelength = ntohs(tlv->typelength);
+		type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+			     I40E_LLDP_TLV_TYPE_SHIFT);
+		length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+			       I40E_LLDP_TLV_LEN_SHIFT);
+
+		if (type == I40E_TLV_TYPE_END)
+			break;/* END TLV break out */
+
+		switch (type) {
+		case I40E_TLV_TYPE_ORG:
+			i40e_parse_org_tlv(tlv, dcbcfg);
+			break;
+		default:
+			break;
+		}
+
+		/* Move to next TLV */
+		tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+						    sizeof(tlv->typelength) +
+						    length);
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_aq_get_dcb_config
+ * @hw: pointer to the hw struct
+ * @mib_type: mib type for the query
+ * @bridgetype: bridge type for the query (remote)
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Query DCB configuration from the Firmware
+ **/
+i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+				   u8 bridgetype,
+				   struct i40e_dcbx_config *dcbcfg)
+{
+	i40e_status ret = 0;
+	struct i40e_virt_mem mem;
+	u8 *lldpmib;
+
+	/* Allocate the LLDPDU */
+	ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+	if (ret)
+		return ret;
+
+	lldpmib = (u8 *)mem.va;
+	ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type,
+				   (void *)lldpmib, I40E_LLDPDU_SIZE,
+				   NULL, NULL, NULL);
+	if (ret)
+		goto free_mem;
+
+	/* Parse LLDP MIB to get dcb configuration */
+	ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg);
+
+free_mem:
+	i40e_free_virt_mem(hw, &mem);
+	return ret;
+}
+
+/**
+ * i40e_get_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get DCB configuration from the Firmware
+ **/
+i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
+{
+	i40e_status ret = 0;
+
+	/* Get Local DCB Config */
+	ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+				     &hw->local_dcbx_config);
+	if (ret)
+		goto out;
+
+	/* Get Remote DCB Config */
+	ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+				     I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+				     &hw->remote_dcbx_config);
+out:
+	return ret;
+}
+
+/**
+ * i40e_init_dcb
+ * @hw: pointer to the hw struct
+ *
+ * Update DCB configuration from the Firmware
+ **/
+i40e_status i40e_init_dcb(struct i40e_hw *hw)
+{
+	i40e_status ret = 0;
+
+	if (!hw->func_caps.dcb)
+		return ret;
+
+	/* Get DCBX status */
+	ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
+	if (ret)
+		return ret;
+
+	/* Check the DCBX Status */
+	switch (hw->dcbx_status) {
+	case I40E_DCBX_STATUS_DONE:
+	case I40E_DCBX_STATUS_IN_PROGRESS:
+		/* Get current DCBX configuration */
+		ret = i40e_get_dcb_config(hw);
+		break;
+	case I40E_DCBX_STATUS_DISABLED:
+		return ret;
+	case I40E_DCBX_STATUS_NOT_STARTED:
+	case I40E_DCBX_STATUS_MULTIPLE_PEERS:
+	default:
+		break;
+	}
+
+	/* Configure the LLDP MIB change event */
+	ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
+	if (ret)
+		return ret;
+
+	return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
new file mode 100644
index 000000000000..34cf1c30c7ff
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -0,0 +1,107 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_DCB_H_
+#define _I40E_DCB_H_
+
+#include "i40e_type.h"
+
+#define I40E_DCBX_STATUS_NOT_STARTED	0
+#define I40E_DCBX_STATUS_IN_PROGRESS	1
+#define I40E_DCBX_STATUS_DONE		2
+#define I40E_DCBX_STATUS_MULTIPLE_PEERS	3
+#define I40E_DCBX_STATUS_DISABLED	7
+
+#define I40E_TLV_TYPE_END		0
+#define I40E_TLV_TYPE_ORG		127
+
+#define I40E_IEEE_8021QAZ_OUI		0x0080C2
+#define I40E_IEEE_SUBTYPE_ETS_CFG	9
+#define I40E_IEEE_SUBTYPE_ETS_REC	10
+#define I40E_IEEE_SUBTYPE_PFC_CFG	11
+#define I40E_IEEE_SUBTYPE_APP_PRI	12
+
+/* Defines for LLDP TLV header */
+#define I40E_LLDP_TLV_LEN_SHIFT		0
+#define I40E_LLDP_TLV_LEN_MASK		(0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
+#define I40E_LLDP_TLV_TYPE_SHIFT	9
+#define I40E_LLDP_TLV_TYPE_MASK		(0x7F << I40E_LLDP_TLV_TYPE_SHIFT)
+#define I40E_LLDP_TLV_SUBTYPE_SHIFT	0
+#define I40E_LLDP_TLV_SUBTYPE_MASK	(0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
+#define I40E_LLDP_TLV_OUI_SHIFT		8
+#define I40E_LLDP_TLV_OUI_MASK		(0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
+
+/* Defines for IEEE ETS TLV */
+#define I40E_IEEE_ETS_MAXTC_SHIFT	0
+#define I40E_IEEE_ETS_MAXTC_MASK	(0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
+#define I40E_IEEE_ETS_CBS_SHIFT		6
+#define I40E_IEEE_ETS_CBS_MASK		(0x1 << I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_WILLING_SHIFT	7
+#define I40E_IEEE_ETS_WILLING_MASK	(0x1 << I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_PRIO_0_SHIFT	0
+#define I40E_IEEE_ETS_PRIO_0_MASK	(0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
+#define I40E_IEEE_ETS_PRIO_1_SHIFT	4
+#define I40E_IEEE_ETS_PRIO_1_MASK	(0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT)
+
+/* Defines for IEEE TSA types */
+#define I40E_IEEE_TSA_STRICT		0
+#define I40E_IEEE_TSA_ETS		2
+
+/* Defines for IEEE PFC TLV */
+#define I40E_IEEE_PFC_CAP_SHIFT		0
+#define I40E_IEEE_PFC_CAP_MASK		(0xF << I40E_IEEE_PFC_CAP_SHIFT)
+#define I40E_IEEE_PFC_MBC_SHIFT		6
+#define I40E_IEEE_PFC_MBC_MASK		(0x1 << I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_WILLING_SHIFT	7
+#define I40E_IEEE_PFC_WILLING_MASK	(0x1 << I40E_IEEE_PFC_WILLING_SHIFT)
+
+/* Defines for IEEE APP TLV */
+#define I40E_IEEE_APP_SEL_SHIFT		0
+#define I40E_IEEE_APP_SEL_MASK		(0x7 << I40E_IEEE_APP_SEL_SHIFT)
+#define I40E_IEEE_APP_PRIO_SHIFT	5
+#define I40E_IEEE_APP_PRIO_MASK		(0x7 << I40E_IEEE_APP_PRIO_SHIFT)
+
+
+#pragma pack(1)
+
+/* IEEE 802.1AB LLDP Organization specific TLV */
+struct i40e_lldp_org_tlv {
+	__be16 typelength;
+	__be32 ouisubtype;
+	u8 tlvinfo[1];
+};
+#pragma pack()
+
+i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
+					   u16 *status);
+i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
+					      struct i40e_dcbx_config *dcbcfg);
+i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+					     u8 bridgetype,
+					     struct i40e_dcbx_config *dcbcfg);
+i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
+i40e_status i40e_init_dcb(struct i40e_hw *hw);
+#endif /* _I40E_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
new file mode 100644
index 000000000000..6e8103abfd0d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -0,0 +1,316 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifdef CONFIG_I40E_DCB
+#include "i40e.h"
+#include <net/dcbnl.h>
+
+/**
+ * i40e_get_pfc_delay - retrieve PFC Link Delay
+ * @hw: pointer to hardware struct
+ * @delay: holds the PFC Link delay value
+ *
+ * Returns PFC Link Delay from the PRTDCB_GENC.PFCLDA
+ **/
+static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
+{
+	u32 val;
+
+	val = rd32(hw, I40E_PRTDCB_GENC);
+	*delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >>
+		       I40E_PRTDCB_GENC_PFCLDA_SHIFT);
+}
+
+/**
+ * i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration
+ * @netdev: the corresponding netdev
+ * @ets: structure to hold the ETS information
+ *
+ * Returns local IEEE ETS configuration
+ **/
+static int i40e_dcbnl_ieee_getets(struct net_device *dev,
+				  struct ieee_ets *ets)
+{
+	struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+	struct i40e_dcbx_config *dcbxcfg;
+	struct i40e_hw *hw = &pf->hw;
+
+	if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+		return -EINVAL;
+
+	dcbxcfg = &hw->local_dcbx_config;
+	ets->willing = dcbxcfg->etscfg.willing;
+	ets->ets_cap = dcbxcfg->etscfg.maxtcs;
+	ets->cbs = dcbxcfg->etscfg.cbs;
+	memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable,
+		sizeof(ets->tc_tx_bw));
+	memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable,
+		sizeof(ets->tc_rx_bw));
+	memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable,
+		sizeof(ets->tc_tsa));
+	memcpy(ets->prio_tc, dcbxcfg->etscfg.prioritytable,
+		sizeof(ets->prio_tc));
+	memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable,
+		sizeof(ets->tc_reco_bw));
+	memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable,
+		sizeof(ets->tc_reco_tsa));
+	memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prioritytable,
+		sizeof(ets->reco_prio_tc));
+
+	return 0;
+}
+
+/**
+ * i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration
+ * @netdev: the corresponding netdev
+ * @ets: structure to hold the PFC information
+ *
+ * Returns local IEEE PFC configuration
+ **/
+static int i40e_dcbnl_ieee_getpfc(struct net_device *dev,
+				  struct ieee_pfc *pfc)
+{
+	struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+	struct i40e_dcbx_config *dcbxcfg;
+	struct i40e_hw *hw = &pf->hw;
+	int i;
+
+	if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+		return -EINVAL;
+
+	dcbxcfg = &hw->local_dcbx_config;
+	pfc->pfc_cap = dcbxcfg->pfc.pfccap;
+	pfc->pfc_en = dcbxcfg->pfc.pfcenable;
+	pfc->mbc = dcbxcfg->pfc.mbc;
+	i40e_get_pfc_delay(hw, &pfc->delay);
+
+	/* Get Requests/Indicatiosn */
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		pfc->requests[i] = pf->stats.priority_xoff_tx[i];
+		pfc->indications[i] = pf->stats.priority_xoff_rx[i];
+	}
+
+	return 0;
+}
+
+/**
+ * i40e_dcbnl_getdcbx - retrieve current DCBx capability
+ * @netdev: the corresponding netdev
+ *
+ * Returns DCBx capability features
+ **/
+static u8 i40e_dcbnl_getdcbx(struct net_device *dev)
+{
+	struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+	return pf->dcbx_cap;
+}
+
+/**
+ * i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx
+ * @netdev: the corresponding netdev
+ *
+ * Returns the SAN MAC address used for LLDP exchange
+ **/
+static void i40e_dcbnl_get_perm_hw_addr(struct net_device *dev,
+					u8 *perm_addr)
+{
+	struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+	int i, j;
+
+	memset(perm_addr, 0xff, MAX_ADDR_LEN);
+
+	for (i = 0; i < dev->addr_len; i++)
+		perm_addr[i] = pf->hw.mac.perm_addr[i];
+
+	for (j = 0; j < dev->addr_len; j++, i++)
+		perm_addr[i] = pf->hw.mac.san_addr[j];
+}
+
+static const struct dcbnl_rtnl_ops dcbnl_ops = {
+	.ieee_getets	= i40e_dcbnl_ieee_getets,
+	.ieee_getpfc	= i40e_dcbnl_ieee_getpfc,
+	.getdcbx	= i40e_dcbnl_getdcbx,
+	.getpermhwaddr  = i40e_dcbnl_get_perm_hw_addr,
+};
+
+/**
+ * i40e_dcbnl_set_all - set all the apps and ieee data from DCBx config
+ * @vsi: the corresponding vsi
+ *
+ * Set up all the IEEE APPs in the DCBNL App Table and generate event for
+ * other settings
+ **/
+void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
+{
+	struct net_device *dev = vsi->netdev;
+	struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+	struct i40e_dcbx_config *dcbxcfg;
+	struct i40e_hw *hw = &pf->hw;
+	struct dcb_app sapp;
+	u8 prio, tc_map;
+	int i;
+
+	/* DCB not enabled */
+	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+		return;
+
+	dcbxcfg = &hw->local_dcbx_config;
+
+	/* Set up all the App TLVs if DCBx is negotiated */
+	for (i = 0; i < dcbxcfg->numapps; i++) {
+		prio = dcbxcfg->app[i].priority;
+		tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]);
+
+		/* Add APP only if the TC is enabled for this VSI */
+		if (tc_map & vsi->tc_config.enabled_tc) {
+			sapp.selector = dcbxcfg->app[i].selector;
+			sapp.protocol = dcbxcfg->app[i].protocolid;
+			sapp.priority = prio;
+			dcb_ieee_setapp(dev, &sapp);
+		}
+	}
+
+	/* Notify user-space of the changes */
+	dcbnl_ieee_notify(dev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0);
+}
+
+/**
+ * i40e_dcbnl_vsi_del_app - Delete APP for given VSI
+ * @vsi: the corresponding vsi
+ * @app: APP to delete
+ *
+ * Delete given APP from the DCBNL APP table for given
+ * VSI
+ **/
+static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
+				  struct i40e_ieee_app_priority_table *app)
+{
+	struct net_device *dev = vsi->netdev;
+	struct dcb_app sapp;
+
+	if (!dev)
+		return -EINVAL;
+
+	sapp.selector = app->selector;
+	sapp.protocol = app->protocolid;
+	sapp.priority = app->priority;
+	return dcb_ieee_delapp(dev, &sapp);
+}
+
+/**
+ * i40e_dcbnl_del_app - Delete APP on all VSIs
+ * @pf: the corresponding pf
+ * @app: APP to delete
+ *
+ * Delete given APP from all the VSIs for given PF
+ **/
+static void i40e_dcbnl_del_app(struct i40e_pf *pf,
+			      struct i40e_ieee_app_priority_table *app)
+{
+	int v, err;
+	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+		if (pf->vsi[v] && pf->vsi[v]->netdev) {
+			err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
+			if (err)
+				dev_info(&pf->pdev->dev, "%s: Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
+					 __func__, pf->vsi[v]->seid,
+					 err, app->selector,
+					 app->protocolid, app->priority);
+		}
+	}
+}
+
+/**
+ * i40e_dcbnl_find_app - Search APP in given DCB config
+ * @cfg: DCBX configuration data
+ * @app: APP to search for
+ *
+ * Find given APP in the DCB configuration
+ **/
+static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
+				struct i40e_ieee_app_priority_table *app)
+{
+	int i;
+
+	for (i = 0; i < cfg->numapps; i++) {
+		if (app->selector == cfg->app[i].selector &&
+		    app->protocolid == cfg->app[i].protocolid &&
+		    app->priority == cfg->app[i].priority)
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * i40e_dcbnl_flush_apps - Delete all removed APPs
+ * @pf: the corresponding pf
+ * @new_cfg: new DCBX configuration data
+ *
+ * Find and delete all APPs that are not present in the passed
+ * DCB configuration
+ **/
+void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+			   struct i40e_dcbx_config *new_cfg)
+{
+	struct i40e_ieee_app_priority_table app;
+	struct i40e_dcbx_config *dcbxcfg;
+	struct i40e_hw *hw = &pf->hw;
+	int i;
+
+	dcbxcfg = &hw->local_dcbx_config;
+	for (i = 0; i < dcbxcfg->numapps; i++) {
+		app = dcbxcfg->app[i];
+		/* The APP is not available anymore delete it */
+		if (!i40e_dcbnl_find_app(new_cfg, &app))
+			i40e_dcbnl_del_app(pf, &app);
+	}
+}
+
+/**
+ * i40e_dcbnl_setup - DCBNL setup
+ * @vsi: the corresponding vsi
+ *
+ * Set up DCBNL ops and initial APP TLVs
+ **/
+void i40e_dcbnl_setup(struct i40e_vsi *vsi)
+{
+	struct net_device *dev = vsi->netdev;
+	struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+	/* DCB not enabled */
+	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+		return;
+
+	/* Do not setup DCB NL ops for MFP mode */
+	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+		dev->dcbnl_ops = &dcbnl_ops;
+
+	/* Set initial IEEE DCB settings */
+	i40e_dcbnl_set_all(vsi);
+}
+#endif /* CONFIG_I40E_DCB */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index ef4cb1cf31f2..da22c3fa2c00 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -192,12 +191,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
 
 			len = (sizeof(struct i40e_aq_desc)
 					* pf->hw.aq.num_asq_entries);
-			memcpy(p, pf->hw.aq.asq.desc, len);
+			memcpy(p, pf->hw.aq.asq.desc_buf.va, len);
 			p += len;
 
 			len = (sizeof(struct i40e_aq_desc)
 					* pf->hw.aq.num_arq_entries);
-			memcpy(p, pf->hw.aq.arq.desc, len);
+			memcpy(p, pf->hw.aq.arq.desc_buf.va, len);
 			p += len;
 
 			i40e_dbg_dump_data_len = buflen;
@@ -362,7 +361,7 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
 }
 
 /**
- * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into pokem datum
+ * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
  * @pf: the i40e_pf created in command write
  * @seid: the seid the user put in
  **/
@@ -516,10 +515,10 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
 			 rx_ring->stats.bytes,
 			 rx_ring->rx_stats.non_eop_descs);
 		dev_info(&pf->pdev->dev,
-			 "    rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
+			 "    rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n",
 			 i,
-			 rx_ring->rx_stats.alloc_rx_page_failed,
-			rx_ring->rx_stats.alloc_rx_buff_failed);
+			 rx_ring->rx_stats.alloc_page_failed,
+			 rx_ring->rx_stats.alloc_buff_failed);
 		dev_info(&pf->pdev->dev,
 			 "    rx_rings[%i]: size = %i, dma = 0x%08lx\n",
 			 i, rx_ring->size,
@@ -533,6 +532,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
 		struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
 		if (!tx_ring)
 			continue;
+
 		dev_info(&pf->pdev->dev,
 			 "    tx_rings[%i]: desc = %p\n",
 			 i, tx_ring->desc);
@@ -707,8 +707,13 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
 {
 	struct i40e_adminq_ring *ring;
 	struct i40e_hw *hw = &pf->hw;
+	char hdr[32];
 	int i;
 
+	snprintf(hdr, sizeof(hdr), "%s %s:         ",
+		 dev_driver_string(&pf->pdev->dev),
+		 dev_name(&pf->pdev->dev));
+
 	/* first the send (command) ring, then the receive (event) ring */
 	dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
 	ring = &(hw->aq.asq);
@@ -718,14 +723,8 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
 			 "   at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
 			 i, d->flags, d->opcode, d->datalen, d->retval,
 			 d->cookie_high, d->cookie_low);
-		dev_info(&pf->pdev->dev,
-			 "            %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
-			 d->params.raw[0], d->params.raw[1], d->params.raw[2],
-			 d->params.raw[3], d->params.raw[4], d->params.raw[5],
-			 d->params.raw[6], d->params.raw[7], d->params.raw[8],
-			 d->params.raw[9], d->params.raw[10], d->params.raw[11],
-			 d->params.raw[12], d->params.raw[13],
-			 d->params.raw[14], d->params.raw[15]);
+		print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
+			       16, 1, d->params.raw, 16, 0);
 	}
 
 	dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
@@ -736,14 +735,8 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
 			 "   ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
 			 i, d->flags, d->opcode, d->datalen, d->retval,
 			 d->cookie_high, d->cookie_low);
-		dev_info(&pf->pdev->dev,
-			 "            %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
-			 d->params.raw[0], d->params.raw[1], d->params.raw[2],
-			 d->params.raw[3], d->params.raw[4], d->params.raw[5],
-			 d->params.raw[6], d->params.raw[7], d->params.raw[8],
-			 d->params.raw[9], d->params.raw[10], d->params.raw[11],
-			 d->params.raw[12], d->params.raw[13],
-			 d->params.raw[14], d->params.raw[15]);
+		print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
+			       16, 1, d->params.raw, 16, 0);
 	}
 }
 
@@ -759,27 +752,25 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
 static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
 			       struct i40e_pf *pf, bool is_rx_ring)
 {
-	union i40e_rx_desc *ds;
+	struct i40e_tx_desc *txd;
+	union i40e_rx_desc *rxd;
 	struct i40e_ring ring;
 	struct i40e_vsi *vsi;
 	int i;
 
 	vsi = i40e_dbg_find_vsi(pf, vsi_seid);
 	if (!vsi) {
-		dev_info(&pf->pdev->dev,
-			 "vsi %d not found\n", vsi_seid);
-		if (is_rx_ring)
-			dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
-		else
-			dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+		dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
 		return;
 	}
 	if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
 		dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
-		if (is_rx_ring)
-			dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
-		else
-			dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+		return;
+	}
+	if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) {
+		dev_info(&pf->pdev->dev,
+			 "descriptor rings have not been allocated for vsi %d\n",
+			 vsi_seid);
 		return;
 	}
 	if (is_rx_ring)
@@ -790,22 +781,27 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
 		dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
 			 vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
 		for (i = 0; i < ring.count; i++) {
-			if (is_rx_ring)
-				ds = I40E_RX_DESC(&ring, i);
-			else
-				ds = (union i40e_rx_desc *)
-					I40E_TX_DESC(&ring, i);
-			if ((sizeof(union i40e_rx_desc) ==
-			    sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
+			if (!is_rx_ring) {
+				txd = I40E_TX_DESC(&ring, i);
 				dev_info(&pf->pdev->dev,
-					 "   d[%03i] = 0x%016llx 0x%016llx\n", i,
-					 ds->read.pkt_addr, ds->read.hdr_addr);
-			else
+					 "   d[%03i] = 0x%016llx 0x%016llx\n",
+					 i, txd->buffer_addr,
+					 txd->cmd_type_offset_bsz);
+			} else if (sizeof(union i40e_rx_desc) ==
+				   sizeof(union i40e_16byte_rx_desc)) {
+				rxd = I40E_RX_DESC(&ring, i);
+				dev_info(&pf->pdev->dev,
+					 "   d[%03i] = 0x%016llx 0x%016llx\n",
+					 i, rxd->read.pkt_addr,
+					 rxd->read.hdr_addr);
+			} else {
+				rxd = I40E_RX_DESC(&ring, i);
 				dev_info(&pf->pdev->dev,
 					 "   d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
-					 i, ds->read.pkt_addr,
-					 ds->read.hdr_addr,
-					 ds->read.rsvd1, ds->read.rsvd2);
+					 i, rxd->read.pkt_addr,
+					 rxd->read.hdr_addr,
+					 rxd->read.rsvd1, rxd->read.rsvd2);
+			}
 		}
 	} else if (cnt == 3) {
 		if (desc_n >= ring.count || desc_n < 0) {
@@ -813,27 +809,29 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
 				 "descriptor %d not found\n", desc_n);
 			return;
 		}
-		if (is_rx_ring)
-			ds = I40E_RX_DESC(&ring, desc_n);
-		else
-			ds = (union i40e_rx_desc *)I40E_TX_DESC(&ring, desc_n);
-		if ((sizeof(union i40e_rx_desc) ==
-		    sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
+		if (!is_rx_ring) {
+			txd = I40E_TX_DESC(&ring, desc_n);
 			dev_info(&pf->pdev->dev,
-				 "vsi = %02i %s ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
-				 vsi_seid, is_rx_ring ? "rx" : "tx", ring_id,
-				 desc_n, ds->read.pkt_addr, ds->read.hdr_addr);
-		else
+				 "vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
+				 vsi_seid, ring_id, desc_n,
+				 txd->buffer_addr, txd->cmd_type_offset_bsz);
+		} else if (sizeof(union i40e_rx_desc) ==
+			   sizeof(union i40e_16byte_rx_desc)) {
+			rxd = I40E_RX_DESC(&ring, desc_n);
+			dev_info(&pf->pdev->dev,
+				 "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
+				 vsi_seid, ring_id, desc_n,
+				 rxd->read.pkt_addr, rxd->read.hdr_addr);
+		} else {
+			rxd = I40E_RX_DESC(&ring, desc_n);
 			dev_info(&pf->pdev->dev,
 				 "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
-				 vsi_seid, ring_id,
-				 desc_n, ds->read.pkt_addr, ds->read.hdr_addr,
-				 ds->read.rsvd1, ds->read.rsvd2);
+				 vsi_seid, ring_id, desc_n,
+				 rxd->read.pkt_addr, rxd->read.hdr_addr,
+				 rxd->read.rsvd1, rxd->read.rsvd2);
+		}
 	} else {
-		if (is_rx_ring)
-			dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
-		else
-			dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+		dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
 	}
 }
 
@@ -979,8 +977,7 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
 
 	veb = i40e_dbg_find_veb(pf, seid);
 	if (!veb) {
-		dev_info(&pf->pdev->dev,
-			 "%d: can't find veb\n", seid);
+		dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
 		return;
 	}
 	dev_info(&pf->pdev->dev,
@@ -1006,6 +1003,22 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
 	}
 }
 
+/**
+ * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR
+ * @pf: the pf that would be altered
+ * @flag: flag that needs enabling or disabling
+ * @enable: Enable/disable FD SD/ATR
+ **/
+static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
+{
+	if (enable)
+		pf->flags |= flag;
+	else
+		pf->flags &= ~flag;
+	dev_info(&pf->pdev->dev, "requesting a pf reset\n");
+	i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+}
+
 #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
 /**
  * i40e_dbg_command_write - write into command datum
@@ -1022,8 +1035,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 	char *cmd_buf, *cmd_buf_tmp;
 	int bytes_not_copied;
 	struct i40e_vsi *vsi;
-	u8 *print_buf_start;
-	u8 *print_buf;
 	int vsi_seid;
 	int veb_seid;
 	int cnt;
@@ -1048,11 +1059,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 		count = cmd_buf_tmp - cmd_buf + 1;
 	}
 
-	print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
-	if (!print_buf_start)
-		goto command_write_done;
-	print_buf = print_buf_start;
-
 	if (strncmp(cmd_buf, "add vsi", 7) == 0) {
 		vsi_seid = -1;
 		cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
@@ -1104,7 +1110,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 		vsi = i40e_dbg_find_vsi(pf, vsi_seid);
 		if (!vsi) {
 			dev_info(&pf->pdev->dev,
-				 "add relay: vsi VSI %d not found\n", vsi_seid);
+				 "add relay: VSI %d not found\n", vsi_seid);
 			goto command_write_done;
 		}
 
@@ -1462,20 +1468,24 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 		}
 	} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
 		dev_info(&pf->pdev->dev, "forcing PFR\n");
-		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+		i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
 
 	} else if (strncmp(cmd_buf, "corer", 5) == 0) {
 		dev_info(&pf->pdev->dev, "forcing CoreR\n");
-		i40e_do_reset(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+		i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
 
 	} else if (strncmp(cmd_buf, "globr", 5) == 0) {
 		dev_info(&pf->pdev->dev, "forcing GlobR\n");
-		i40e_do_reset(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+		i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+
+	} else if (strncmp(cmd_buf, "empr", 4) == 0) {
+		dev_info(&pf->pdev->dev, "forcing EMPR\n");
+		i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
 
 	} else if (strncmp(cmd_buf, "read", 4) == 0) {
 		u32 address;
 		u32 value;
-		cnt = sscanf(&cmd_buf[4], "%x", &address);
+		cnt = sscanf(&cmd_buf[4], "%i", &address);
 		if (cnt != 1) {
 			dev_info(&pf->pdev->dev, "read <reg>\n");
 			goto command_write_done;
@@ -1494,7 +1504,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 
 	} else if (strncmp(cmd_buf, "write", 5) == 0) {
 		u32 address, value;
-		cnt = sscanf(&cmd_buf[5], "%x %x", &address, &value);
+		cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
 		if (cnt != 2) {
 			dev_info(&pf->pdev->dev, "write <reg> <value>\n");
 			goto command_write_done;
@@ -1512,7 +1522,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 			 address, value);
 	} else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
 		if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
-			cnt = sscanf(&cmd_buf[15], "%d", &vsi_seid);
+			cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
 			if (cnt == 0) {
 				int i;
 				for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
@@ -1539,6 +1549,118 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 		} else {
 			dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n");
 		}
+	} else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
+		struct i40e_aq_desc *desc;
+		i40e_status ret;
+
+		desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+		if (!desc)
+			goto command_write_done;
+		cnt = sscanf(&cmd_buf[11],
+			     "%hx %hx %hx %hx %x %x %x %x %x %x",
+			     &desc->flags,
+			     &desc->opcode, &desc->datalen, &desc->retval,
+			     &desc->cookie_high, &desc->cookie_low,
+			     &desc->params.internal.param0,
+			     &desc->params.internal.param1,
+			     &desc->params.internal.param2,
+			     &desc->params.internal.param3);
+		if (cnt != 10) {
+			dev_info(&pf->pdev->dev,
+				 "send aq_cmd: bad command string, cnt=%d\n",
+				 cnt);
+			kfree(desc);
+			desc = NULL;
+			goto command_write_done;
+		}
+		ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL);
+		if (!ret) {
+			dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
+		} else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
+			dev_info(&pf->pdev->dev,
+				 "AQ command send failed Opcode %x AQ Error: %d\n",
+				 desc->opcode, pf->hw.aq.asq_last_status);
+		} else {
+			dev_info(&pf->pdev->dev,
+				 "AQ command send failed Opcode %x Status: %d\n",
+				 desc->opcode, ret);
+		}
+		dev_info(&pf->pdev->dev,
+			 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			 desc->flags, desc->opcode, desc->datalen, desc->retval,
+			 desc->cookie_high, desc->cookie_low,
+			 desc->params.internal.param0,
+			 desc->params.internal.param1,
+			 desc->params.internal.param2,
+			 desc->params.internal.param3);
+		kfree(desc);
+		desc = NULL;
+	} else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
+		struct i40e_aq_desc *desc;
+		i40e_status ret;
+		u16 buffer_len;
+		u8 *buff;
+
+		desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+		if (!desc)
+			goto command_write_done;
+		cnt = sscanf(&cmd_buf[20],
+			     "%hx %hx %hx %hx %x %x %x %x %x %x %hd",
+			     &desc->flags,
+			     &desc->opcode, &desc->datalen, &desc->retval,
+			     &desc->cookie_high, &desc->cookie_low,
+			     &desc->params.internal.param0,
+			     &desc->params.internal.param1,
+			     &desc->params.internal.param2,
+			     &desc->params.internal.param3,
+			     &buffer_len);
+		if (cnt != 11) {
+			dev_info(&pf->pdev->dev,
+				 "send indirect aq_cmd: bad command string, cnt=%d\n",
+				 cnt);
+			kfree(desc);
+			desc = NULL;
+			goto command_write_done;
+		}
+		/* Just stub a buffer big enough in case user messed up */
+		if (buffer_len == 0)
+			buffer_len = 1280;
+
+		buff = kzalloc(buffer_len, GFP_KERNEL);
+		if (!buff) {
+			kfree(desc);
+			desc = NULL;
+			goto command_write_done;
+		}
+		desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+		ret = i40e_asq_send_command(&pf->hw, desc, buff,
+					    buffer_len, NULL);
+		if (!ret) {
+			dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
+		} else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
+			dev_info(&pf->pdev->dev,
+				 "AQ command send failed Opcode %x AQ Error: %d\n",
+				 desc->opcode, pf->hw.aq.asq_last_status);
+		} else {
+			dev_info(&pf->pdev->dev,
+				 "AQ command send failed Opcode %x Status: %d\n",
+				 desc->opcode, ret);
+		}
+		dev_info(&pf->pdev->dev,
+			 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			 desc->flags, desc->opcode, desc->datalen, desc->retval,
+			 desc->cookie_high, desc->cookie_low,
+			 desc->params.internal.param0,
+			 desc->params.internal.param1,
+			 desc->params.internal.param2,
+			 desc->params.internal.param3);
+		print_hex_dump(KERN_INFO, "AQ buffer WB: ",
+			       DUMP_PREFIX_OFFSET, 16, 1,
+			       buff, buffer_len, true);
+		kfree(buff);
+		buff = NULL;
+		kfree(desc);
+		desc = NULL;
 	} else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
 		   (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
 		struct i40e_fdir_data fd_data;
@@ -1564,7 +1686,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 		if (strncmp(cmd_buf, "add", 3) == 0)
 			add = true;
 		cnt = sscanf(&cmd_buf[13],
-			     "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %512s",
+			     "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
 			     &fd_data.q_index,
 			     &fd_data.flex_off, &fd_data.pctype,
 			     &fd_data.dest_vsi, &fd_data.dest_ctl,
@@ -1588,19 +1710,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 		packet_len = min_t(u16,
 				   packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
 
-		dev_info(&pf->pdev->dev, "FD raw packet:\n");
 		for (i = 0; i < packet_len; i++) {
 			sscanf(&asc_packet[j], "%2hhx ",
 			       &fd_data.raw_packet[i]);
 			j += 3;
-			snprintf(print_buf, 3, "%02x ", fd_data.raw_packet[i]);
-			print_buf += 3;
-			if ((i % 16) == 15) {
-				snprintf(print_buf, 1, "\n");
-				print_buf++;
-			}
 		}
-		dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+		dev_info(&pf->pdev->dev, "FD raw packet dump\n");
+		print_hex_dump(KERN_INFO, "FD raw packet: ",
+			       DUMP_PREFIX_OFFSET, 16, 1,
+			       fd_data.raw_packet, packet_len, true);
 		ret = i40e_program_fdir_filter(&fd_data, pf, add);
 		if (!ret) {
 			dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
@@ -1612,6 +1730,14 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 		fd_data.raw_packet = NULL;
 		kfree(asc_packet);
 		asc_packet = NULL;
+	} else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) {
+		i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
+	} else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
+		i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
+	} else if (strncmp(cmd_buf, "fd-sb off", 9) == 0) {
+		i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, false);
+	} else if (strncmp(cmd_buf, "fd-sb on", 8) == 0) {
+		i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, true);
 	} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
 		if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
 			int ret;
@@ -1622,8 +1748,35 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 					 pf->hw.aq.asq_last_status);
 				goto command_write_done;
 			}
+			ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
+						pf->hw.mac.addr,
+						I40E_ETH_P_LLDP, 0,
+						pf->vsi[pf->lan_vsi]->seid,
+						0, true, NULL, NULL);
+			if (ret) {
+				dev_info(&pf->pdev->dev,
+					"%s: Add Control Packet Filter AQ command failed =0x%x\n",
+					__func__, pf->hw.aq.asq_last_status);
+				goto command_write_done;
+			}
+#ifdef CONFIG_I40E_DCB
+			pf->dcbx_cap = DCB_CAP_DCBX_HOST |
+				       DCB_CAP_DCBX_VER_IEEE;
+#endif /* CONFIG_I40E_DCB */
 		} else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
 			int ret;
+			ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
+						pf->hw.mac.addr,
+						I40E_ETH_P_LLDP, 0,
+						pf->vsi[pf->lan_vsi]->seid,
+						0, false, NULL, NULL);
+			if (ret) {
+				dev_info(&pf->pdev->dev,
+					"%s: Remove Control Packet Filter AQ command failed =0x%x\n",
+					__func__, pf->hw.aq.asq_last_status);
+				/* Continue and start FW LLDP anyways */
+			}
+
 			ret = i40e_aq_start_lldp(&pf->hw, NULL);
 			if (ret) {
 				dev_info(&pf->pdev->dev,
@@ -1631,10 +1784,14 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 					 pf->hw.aq.asq_last_status);
 				goto command_write_done;
 			}
+#ifdef CONFIG_I40E_DCB
+			pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
+				       DCB_CAP_DCBX_VER_IEEE;
+#endif /* CONFIG_I40E_DCB */
 		} else if (strncmp(&cmd_buf[5],
 			   "get local", 9) == 0) {
 			u16 llen, rlen;
-			int ret, i;
+			int ret;
 			u8 *buff;
 			buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
 			if (!buff)
@@ -1652,22 +1809,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 				buff = NULL;
 				goto command_write_done;
 			}
-			dev_info(&pf->pdev->dev,
-				 "Get LLDP MIB (local) AQ buffer written back:\n");
-			for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
-				snprintf(print_buf, 3, "%02x ", buff[i]);
-				print_buf += 3;
-				if ((i % 16) == 15) {
-					snprintf(print_buf, 1, "\n");
-					print_buf++;
-				}
-			}
-			dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+			dev_info(&pf->pdev->dev, "LLDP MIB (local)\n");
+			print_hex_dump(KERN_INFO, "LLDP MIB (local): ",
+				       DUMP_PREFIX_OFFSET, 16, 1,
+				       buff, I40E_LLDPDU_SIZE, true);
 			kfree(buff);
 			buff = NULL;
 		} else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
 			u16 llen, rlen;
-			int ret, i;
+			int ret;
 			u8 *buff;
 			buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
 			if (!buff)
@@ -1686,17 +1836,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 				buff = NULL;
 				goto command_write_done;
 			}
-			dev_info(&pf->pdev->dev,
-				 "Get LLDP MIB (remote) AQ buffer written back:\n");
-			for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
-				snprintf(print_buf, 3, "%02x ", buff[i]);
-				print_buf += 3;
-				if ((i % 16) == 15) {
-					snprintf(print_buf, 1, "\n");
-					print_buf++;
-				}
-			}
-			dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+			dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n");
+			print_hex_dump(KERN_INFO, "LLDP MIB (remote): ",
+				       DUMP_PREFIX_OFFSET, 16, 1,
+				       buff, I40E_LLDPDU_SIZE, true);
 			kfree(buff);
 			buff = NULL;
 		} else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
@@ -1721,7 +1864,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 			}
 		}
 	} else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
-		u16 buffer_len, i, bytes;
+		u16 buffer_len, bytes;
 		u16 module;
 		u32 offset;
 		u16 *buff;
@@ -1775,16 +1918,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 			dev_info(&pf->pdev->dev,
 				 "Read NVM module=0x%x offset=0x%x words=%d\n",
 				 module, offset, buffer_len);
-			for (i = 0; i < buffer_len; i++) {
-				if ((i % 16) == 0) {
-					snprintf(print_buf, 11, "\n0x%08x: ",
-						 offset + i);
-					print_buf += 11;
-				}
-				snprintf(print_buf, 5, "%04x ", buff[i]);
-				print_buf += 5;
-			}
-			dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+			if (bytes)
+				print_hex_dump(KERN_INFO, "NVM Dump: ",
+					DUMP_PREFIX_OFFSET, 16, 2,
+					buff, bytes, true);
 		}
 		kfree(buff);
 		buff = NULL;
@@ -1814,8 +1951,14 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 		dev_info(&pf->pdev->dev, "  pfr\n");
 		dev_info(&pf->pdev->dev, "  corer\n");
 		dev_info(&pf->pdev->dev, "  globr\n");
+		dev_info(&pf->pdev->dev, "  send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
+		dev_info(&pf->pdev->dev, "  send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
 		dev_info(&pf->pdev->dev, "  add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
 		dev_info(&pf->pdev->dev, "  rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
+		dev_info(&pf->pdev->dev, "  fd-atr off\n");
+		dev_info(&pf->pdev->dev, "  fd-atr on\n");
+		dev_info(&pf->pdev->dev, "  fd-sb off\n");
+		dev_info(&pf->pdev->dev, "  fd-sb on\n");
 		dev_info(&pf->pdev->dev, "  lldp start\n");
 		dev_info(&pf->pdev->dev, "  lldp stop\n");
 		dev_info(&pf->pdev->dev, "  lldp get local\n");
@@ -1828,9 +1971,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 command_write_done:
 	kfree(cmd_buf);
 	cmd_buf = NULL;
-	kfree(print_buf_start);
-	print_buf = NULL;
-	print_buf_start = NULL;
 	return count;
 }
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index de255143bde6..b2380daef8c1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -68,16 +67,16 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
 
 struct i40e_diag_reg_test_info i40e_reg_list[] = {
 	/* offset               mask         elements   stride */
-	{I40E_QTX_CTL(0),       0x0000FFBF,  64, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+	{I40E_QTX_CTL(0),       0x0000FFBF,   4, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
 	{I40E_PFINT_ITR0(0),    0x00000FFF,   3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
-	{I40E_PFINT_ITRN(0, 0), 0x00000FFF,  64, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
-	{I40E_PFINT_ITRN(1, 0), 0x00000FFF,  64, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
-	{I40E_PFINT_ITRN(2, 0), 0x00000FFF,  64, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
+	{I40E_PFINT_ITRN(0, 0), 0x00000FFF,   8, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
+	{I40E_PFINT_ITRN(1, 0), 0x00000FFF,   8, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
+	{I40E_PFINT_ITRN(2, 0), 0x00000FFF,   8, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
 	{I40E_PFINT_STAT_CTL0,  0x0000000C,   1, 0},
 	{I40E_PFINT_LNKLST0,    0x00001FFF,   1, 0},
-	{I40E_PFINT_LNKLSTN(0), 0x000007FF, 511, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
-	{I40E_QINT_TQCTL(0),    0x000000FF, I40E_QINT_TQCTL_MAX_INDEX + 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
-	{I40E_QINT_RQCTL(0),    0x000000FF, I40E_QINT_RQCTL_MAX_INDEX + 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
+	{I40E_PFINT_LNKLSTN(0), 0x000007FF,  64, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
+	{I40E_QINT_TQCTL(0),    0x000000FF,  64, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
+	{I40E_QINT_RQCTL(0),    0x000000FF,  64, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
 	{I40E_PFINT_ICR0_ENA,   0xF7F20000,   1, 0},
 	{ 0 }
 };
@@ -119,7 +118,7 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
 
 	/* read NVM control word and if NVM valid, validate EEPROM checksum*/
 	ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
-	if ((!ret_code) &&
+	if (!ret_code &&
 	    ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
 	     (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
 		ret_code = i40e_validate_nvm_checksum(hw, NULL);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index 3d98277f4526..0b5911652084 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -31,10 +30,10 @@
 #include "i40e_type.h"
 
 enum i40e_lb_mode {
-	I40E_LB_MODE_NONE = 0,
-	I40E_LB_MODE_PHY_LOCAL,
-	I40E_LB_MODE_PHY_REMOTE,
-	I40E_LB_MODE_MAC_LOCAL,
+	I40E_LB_MODE_NONE       = 0x0,
+	I40E_LB_MODE_PHY_LOCAL  = I40E_AQ_LB_PHY_LOCAL,
+	I40E_LB_MODE_PHY_REMOTE = I40E_AQ_LB_PHY_REMOTE,
+	I40E_LB_MODE_MAC_LOCAL  = I40E_AQ_LB_MAC_LOCAL,
 };
 
 struct i40e_diag_reg_test_info {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 1b86138fa9e1..b1d7d8c5cb9b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -109,6 +108,8 @@ static struct i40e_stats i40e_gstrings_stats[] = {
 	I40E_PF_STAT("rx_oversize", stats.rx_oversize),
 	I40E_PF_STAT("rx_jabber", stats.rx_jabber),
 	I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
+	I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+	I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
 };
 
 #define I40E_QUEUE_STATS_LEN(n) \
@@ -193,28 +194,48 @@ static int i40e_get_settings(struct net_device *netdev,
 		ecmd->supported = SUPPORTED_10000baseKR_Full;
 		ecmd->advertising = ADVERTISED_10000baseKR_Full;
 		break;
-	case I40E_PHY_TYPE_10GBASE_T:
 	default:
-		ecmd->supported = SUPPORTED_10000baseT_Full;
-		ecmd->advertising = ADVERTISED_10000baseT_Full;
+		if (i40e_is_40G_device(hw->device_id)) {
+			ecmd->supported = SUPPORTED_40000baseSR4_Full;
+			ecmd->advertising = ADVERTISED_40000baseSR4_Full;
+		} else {
+			ecmd->supported = SUPPORTED_10000baseT_Full;
+			ecmd->advertising = ADVERTISED_10000baseT_Full;
+		}
 		break;
 	}
 
-	/* for now just say autoneg all the time */
 	ecmd->supported |= SUPPORTED_Autoneg;
+	ecmd->advertising |= ADVERTISED_Autoneg;
+	ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+			  AUTONEG_ENABLE : AUTONEG_DISABLE);
 
-	if (hw->phy.media_type == I40E_MEDIA_TYPE_BACKPLANE) {
+	switch (hw->phy.media_type) {
+	case I40E_MEDIA_TYPE_BACKPLANE:
 		ecmd->supported |= SUPPORTED_Backplane;
 		ecmd->advertising |= ADVERTISED_Backplane;
 		ecmd->port = PORT_NONE;
-	} else if (hw->phy.media_type == I40E_MEDIA_TYPE_BASET) {
+		break;
+	case I40E_MEDIA_TYPE_BASET:
 		ecmd->supported |= SUPPORTED_TP;
 		ecmd->advertising |= ADVERTISED_TP;
 		ecmd->port = PORT_TP;
-	} else {
+		break;
+	case I40E_MEDIA_TYPE_DA:
+	case I40E_MEDIA_TYPE_CX4:
+		ecmd->supported |= SUPPORTED_FIBRE;
+		ecmd->advertising |= ADVERTISED_FIBRE;
+		ecmd->port = PORT_DA;
+		break;
+	case I40E_MEDIA_TYPE_FIBER:
 		ecmd->supported |= SUPPORTED_FIBRE;
 		ecmd->advertising |= ADVERTISED_FIBRE;
 		ecmd->port = PORT_FIBRE;
+		break;
+	case I40E_MEDIA_TYPE_UNKNOWN:
+	default:
+		ecmd->port = PORT_OTHER;
+		break;
 	}
 
 	ecmd->transceiver = XCVR_EXTERNAL;
@@ -256,12 +277,14 @@ static void i40e_get_pauseparam(struct net_device *netdev,
 		((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
 		  AUTONEG_ENABLE : AUTONEG_DISABLE);
 
-	pause->rx_pause = 0;
-	pause->tx_pause = 0;
-	if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_RX)
+	if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
+		pause->rx_pause = 1;
+	} else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
+		pause->tx_pause = 1;
+	} else if (hw->fc.current_mode == I40E_FC_FULL) {
 		pause->rx_pause = 1;
-	if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_TX)
 		pause->tx_pause = 1;
+	}
 }
 
 static u32 i40e_get_msglevel(struct net_device *netdev)
@@ -329,38 +352,56 @@ static int i40e_get_eeprom(struct net_device *netdev,
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_hw *hw = &np->vsi->back->hw;
-	int first_word, last_word;
-	u16 i, eeprom_len;
-	u16 *eeprom_buff;
-	int ret_val = 0;
-
+	struct i40e_pf *pf = np->vsi->back;
+	int ret_val = 0, len;
+	u8 *eeprom_buff;
+	u16 i, sectors;
+	bool last;
+#define I40E_NVM_SECTOR_SIZE  4096
 	if (eeprom->len == 0)
 		return -EINVAL;
 
 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
 
-	first_word = eeprom->offset >> 1;
-	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
-	eeprom_len = last_word - first_word + 1;
-
-	eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
+	eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
 	if (!eeprom_buff)
 		return -ENOMEM;
 
-	ret_val = i40e_read_nvm_buffer(hw, first_word, &eeprom_len,
-					   eeprom_buff);
-	if (eeprom_len == 0) {
-		kfree(eeprom_buff);
-		return -EACCES;
+	ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+	if (ret_val) {
+		dev_info(&pf->pdev->dev,
+			 "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
+			 ret_val, hw->aq.asq_last_status);
+		goto free_buff;
 	}
 
-	/* Device's eeprom is always little-endian, word addressable */
-	for (i = 0; i < eeprom_len; i++)
-		le16_to_cpus(&eeprom_buff[i]);
+	sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
+	sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
+	len = I40E_NVM_SECTOR_SIZE;
+	last = false;
+	for (i = 0; i < sectors; i++) {
+		if (i == (sectors - 1)) {
+			len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
+			last = true;
+		}
+		ret_val = i40e_aq_read_nvm(hw, 0x0,
+				eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
+				len,
+				(u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
+				last, NULL);
+		if (ret_val) {
+			dev_info(&pf->pdev->dev,
+				 "read NVM failed err=%d status=0x%x\n",
+				 ret_val, hw->aq.asq_last_status);
+			goto release_nvm;
+		}
+	}
 
-	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+release_nvm:
+	i40e_release_nvm(hw);
+	memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
+free_buff:
 	kfree(eeprom_buff);
-
 	return ret_val;
 }
 
@@ -368,8 +409,14 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_hw *hw = &np->vsi->back->hw;
-
-	return hw->nvm.sr_size * 2;
+	u32 val;
+
+	val = (rd32(hw, I40E_GLPCI_LBARCTRL)
+		& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
+		>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
+	/* register returns value in power of 2, 64Kbyte chunks. */
+	val = (64 * 1024) * (1 << val);
+	return val;
 }
 
 static void i40e_get_drvinfo(struct net_device *netdev,
@@ -418,15 +465,19 @@ static int i40e_set_ringparam(struct net_device *netdev,
 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
 		return -EINVAL;
 
-	new_tx_count = clamp_t(u32, ring->tx_pending,
-			       I40E_MIN_NUM_DESCRIPTORS,
-			       I40E_MAX_NUM_DESCRIPTORS);
-	new_tx_count = ALIGN(new_tx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+	if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+	    ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
+	    ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+	    ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
+		netdev_info(netdev,
+			    "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
+			    ring->tx_pending, ring->rx_pending,
+			    I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
+		return -EINVAL;
+	}
 
-	new_rx_count = clamp_t(u32, ring->rx_pending,
-			       I40E_MIN_NUM_DESCRIPTORS,
-			       I40E_MAX_NUM_DESCRIPTORS);
-	new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+	new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
+	new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
 
 	/* if nothing to do return success */
 	if ((new_tx_count == vsi->tx_rings[0]->count) &&
@@ -699,11 +750,44 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
 static int i40e_get_ts_info(struct net_device *dev,
 			    struct ethtool_ts_info *info)
 {
-	return ethtool_op_get_ts_info(dev, info);
+	struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+				SOF_TIMESTAMPING_RX_SOFTWARE |
+				SOF_TIMESTAMPING_SOFTWARE |
+				SOF_TIMESTAMPING_TX_HARDWARE |
+				SOF_TIMESTAMPING_RX_HARDWARE |
+				SOF_TIMESTAMPING_RAW_HARDWARE;
+
+	if (pf->ptp_clock)
+		info->phc_index = ptp_clock_index(pf->ptp_clock);
+	else
+		info->phc_index = -1;
+
+	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+			   (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+			   (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+			   (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+
+	return 0;
 }
 
-static int i40e_link_test(struct i40e_pf *pf, u64 *data)
+static int i40e_link_test(struct net_device *netdev, u64 *data)
 {
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+
+	netif_info(pf, hw, netdev, "link test\n");
 	if (i40e_get_link_status(&pf->hw))
 		*data = 0;
 	else
@@ -712,36 +796,51 @@ static int i40e_link_test(struct i40e_pf *pf, u64 *data)
 	return *data;
 }
 
-static int i40e_reg_test(struct i40e_pf *pf, u64 *data)
+static int i40e_reg_test(struct net_device *netdev, u64 *data)
 {
-	i40e_status ret;
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
 
-	ret = i40e_diag_reg_test(&pf->hw);
-	*data = ret;
+	netif_info(pf, hw, netdev, "register test\n");
+	*data = i40e_diag_reg_test(&pf->hw);
 
-	return ret;
+	return *data;
 }
 
-static int i40e_eeprom_test(struct i40e_pf *pf, u64 *data)
+static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
 {
-	i40e_status ret;
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
 
-	ret = i40e_diag_eeprom_test(&pf->hw);
-	*data = ret;
+	netif_info(pf, hw, netdev, "eeprom test\n");
+	*data = i40e_diag_eeprom_test(&pf->hw);
 
-	return ret;
+	return *data;
 }
 
-static int i40e_intr_test(struct i40e_pf *pf, u64 *data)
+static int i40e_intr_test(struct net_device *netdev, u64 *data)
 {
-	*data = -ENOSYS;
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+	u16 swc_old = pf->sw_int_count;
+
+	netif_info(pf, hw, netdev, "interrupt test\n");
+	wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
+	     (I40E_PFINT_DYN_CTL0_INTENA_MASK |
+	      I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
+	usleep_range(1000, 2000);
+	*data = (swc_old == pf->sw_int_count);
 
 	return *data;
 }
 
-static int i40e_loopback_test(struct i40e_pf *pf, u64 *data)
+static int i40e_loopback_test(struct net_device *netdev, u64 *data)
 {
-	*data = -ENOSYS;
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+
+	netif_info(pf, hw, netdev, "loopback test not implemented\n");
+	*data = 0;
 
 	return *data;
 }
@@ -752,42 +851,38 @@ static void i40e_diag_test(struct net_device *netdev,
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_pf *pf = np->vsi->back;
 
-	set_bit(__I40E_TESTING, &pf->state);
 	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
 		/* Offline tests */
+		netif_info(pf, drv, netdev, "offline testing starting\n");
 
-		netdev_info(netdev, "offline testing starting\n");
+		set_bit(__I40E_TESTING, &pf->state);
 
 		/* Link test performed before hardware reset
 		 * so autoneg doesn't interfere with test result
 		 */
-		netdev_info(netdev, "link test starting\n");
-		if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+		if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
-		netdev_info(netdev, "register test starting\n");
-		if (i40e_reg_test(pf, &data[I40E_ETH_TEST_REG]))
+		if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
-		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
-		netdev_info(netdev, "eeprom test starting\n");
-		if (i40e_eeprom_test(pf, &data[I40E_ETH_TEST_EEPROM]))
+		if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
-		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
-		netdev_info(netdev, "interrupt test starting\n");
-		if (i40e_intr_test(pf, &data[I40E_ETH_TEST_INTR]))
+		if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
-		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
-		netdev_info(netdev, "loopback test starting\n");
-		if (i40e_loopback_test(pf, &data[I40E_ETH_TEST_LOOPBACK]))
+		/* run reg test last, a reset is required after it */
+		if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
+		clear_bit(__I40E_TESTING, &pf->state);
+		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
 	} else {
-		netdev_info(netdev, "online test starting\n");
 		/* Online tests */
-		if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+		netif_info(pf, drv, netdev, "online testing starting\n");
+
+		if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 		/* Offline only tests, not run in online; pass by default */
@@ -795,16 +890,53 @@ static void i40e_diag_test(struct net_device *netdev,
 		data[I40E_ETH_TEST_EEPROM] = 0;
 		data[I40E_ETH_TEST_INTR] = 0;
 		data[I40E_ETH_TEST_LOOPBACK] = 0;
-
-		clear_bit(__I40E_TESTING, &pf->state);
 	}
+
+	netif_info(pf, drv, netdev, "testing finished\n");
 }
 
 static void i40e_get_wol(struct net_device *netdev,
 			 struct ethtool_wolinfo *wol)
 {
-	wol->supported = 0;
-	wol->wolopts = 0;
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	u16 wol_nvm_bits;
+
+	/* NVM bit on means WoL disabled for the port */
+	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+	if ((1 << hw->port) & wol_nvm_bits) {
+		wol->supported = 0;
+		wol->wolopts = 0;
+	} else {
+		wol->supported = WAKE_MAGIC;
+		wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0);
+	}
+}
+
+static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	u16 wol_nvm_bits;
+
+	/* NVM bit on means WoL disabled for the port */
+	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+	if (((1 << hw->port) & wol_nvm_bits))
+		return -EOPNOTSUPP;
+
+	/* only magic packet is supported */
+	if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
+		return -EOPNOTSUPP;
+
+	/* is this a new value? */
+	if (pf->wol_en != !!wol->wolopts) {
+		pf->wol_en = !!wol->wolopts;
+		device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
+	}
+
+	return 0;
 }
 
 static int i40e_nway_reset(struct net_device *netdev)
@@ -838,13 +970,13 @@ static int i40e_set_phys_id(struct net_device *netdev,
 		pf->led_status = i40e_led_get(hw);
 		return blink_freq;
 	case ETHTOOL_ID_ON:
-		i40e_led_set(hw, 0xF);
+		i40e_led_set(hw, 0xF, false);
 		break;
 	case ETHTOOL_ID_OFF:
-		i40e_led_set(hw, 0x0);
+		i40e_led_set(hw, 0x0, false);
 		break;
 	case ETHTOOL_ID_INACTIVE:
-		i40e_led_set(hw, pf->led_status);
+		i40e_led_set(hw, pf->led_status, false);
 		break;
 	}
 
@@ -1003,6 +1135,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
 		ret = i40e_get_rss_hash_opts(pf, cmd);
 		break;
 	case ETHTOOL_GRXCLSRLCNT:
+		cmd->rule_cnt = 10;
 		ret = 0;
 		break;
 	case ETHTOOL_GRXCLSRULE:
@@ -1142,6 +1275,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
 }
 
 #define IP_HEADER_OFFSET 14
+#define I40E_UDPIP_DUMMY_PACKET_LEN 42
 /**
  * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for
  * a specific flow spec
@@ -1162,6 +1296,12 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
 	bool err = false;
 	int ret;
 	int i;
+	char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+			 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11,
+			 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+			 0, 0, 0, 0, 0, 0, 0, 0};
+
+	memcpy(fd_data->raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
 
 	ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
 	udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
@@ -1192,6 +1332,7 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
 	return err ? -EOPNOTSUPP : 0;
 }
 
+#define I40E_TCPIP_DUMMY_PACKET_LEN 54
 /**
  * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for
  * a specific flow spec
@@ -1211,6 +1352,14 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
 	struct iphdr *ip;
 	bool err = false;
 	int ret;
+	/* Dummy packet */
+	char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+			 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6,
+			 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+			 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+			 0x80, 0x11, 0x0, 0x72, 0, 0, 0, 0};
+
+	memcpy(fd_data->raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
 
 	ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
 	tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
@@ -1218,6 +1367,15 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
 
 	ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
 	tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
+	ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
+	tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
+
+	if (add) {
+		if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
+			dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+			pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+		}
+	}
 
 	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
 	ret = i40e_program_fdir_filter(fd_data, pf, add);
@@ -1232,9 +1390,6 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
 			 fd_data->pctype, ret);
 	}
 
-	ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
-	tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
-
 	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
 
 	ret = i40e_program_fdir_filter(fd_data, pf, add);
@@ -1268,6 +1423,7 @@ static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
 	return -EOPNOTSUPP;
 }
 
+#define I40E_IP_DUMMY_PACKET_LEN 34
 /**
  * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
  * a specific flow spec
@@ -1287,7 +1443,11 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
 	bool err = false;
 	int ret;
 	int i;
+	char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+			 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10,
+			 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
 
+	memcpy(fd_data->raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
 	ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
 
 	ip->saddr = fsp->h_u.usr_ip4_spec.ip4src;
@@ -1356,8 +1516,8 @@ static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
 	fd_data.flex_off = 0;
 	fd_data.pctype = 0;
 	fd_data.dest_vsi = vsi->id;
-	fd_data.dest_ctl = 0;
-	fd_data.fd_status = 0;
+	fd_data.dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+	fd_data.fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
 	fd_data.cnt_index = 0;
 	fd_data.fd_id = 0;
 
@@ -1400,6 +1560,7 @@ static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
 
 	return ret;
 }
+
 /**
  * i40e_set_rxnfc - command to set RX flow classification rules
  * @netdev: network interface device structure
@@ -1431,6 +1592,94 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
 	return ret;
 }
 
+/**
+ * i40e_max_channels - get Max number of combined channels supported
+ * @vsi: vsi pointer
+ **/
+static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
+{
+	/* TODO: This code assumes DCB and FD is disabled for now. */
+	return vsi->alloc_queue_pairs;
+}
+
+/**
+ * i40e_get_channels - Get the current channels enabled and max supported etc.
+ * @netdev: network interface device structure
+ * @ch: ethtool channels structure
+ *
+ * We don't support separate tx and rx queues as channels. The other count
+ * represents how many queues are being used for control. max_combined counts
+ * how many queue pairs we can support. They may not be mapped 1 to 1 with
+ * q_vectors since we support a lot more queue pairs than q_vectors.
+ **/
+static void i40e_get_channels(struct net_device *dev,
+			       struct ethtool_channels *ch)
+{
+	struct i40e_netdev_priv *np = netdev_priv(dev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+
+	/* report maximum channels */
+	ch->max_combined = i40e_max_channels(vsi);
+
+	/* report info for other vector */
+	ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0;
+	ch->max_other = ch->other_count;
+
+	/* Note: This code assumes DCB is disabled for now. */
+	ch->combined_count = vsi->num_queue_pairs;
+}
+
+/**
+ * i40e_set_channels - Set the new channels count.
+ * @netdev: network interface device structure
+ * @ch: ethtool channels structure
+ *
+ * The new channels count may not be the same as requested by the user
+ * since it gets rounded down to a power of 2 value.
+ **/
+static int i40e_set_channels(struct net_device *dev,
+			      struct ethtool_channels *ch)
+{
+	struct i40e_netdev_priv *np = netdev_priv(dev);
+	unsigned int count = ch->combined_count;
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	int new_count;
+
+	/* We do not support setting channels for any other VSI at present */
+	if (vsi->type != I40E_VSI_MAIN)
+		return -EINVAL;
+
+	/* verify they are not requesting separate vectors */
+	if (!count || ch->rx_count || ch->tx_count)
+		return -EINVAL;
+
+	/* verify other_count has not changed */
+	if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0))
+		return -EINVAL;
+
+	/* verify the number of channels does not exceed hardware limits */
+	if (count > i40e_max_channels(vsi))
+		return -EINVAL;
+
+	/* update feature limits from largest to smallest supported values */
+	/* TODO: Flow director limit, DCB etc */
+
+	/* cap RSS limit */
+	if (count > pf->rss_size_max)
+		count = pf->rss_size_max;
+
+	/* use rss_reconfig to rebuild with new queue count and update traffic
+	 * class queue mapping
+	 */
+	new_count = i40e_reconfig_rss_queues(pf, count);
+	if (new_count > 0)
+		return 0;
+	else
+		return -EINVAL;
+}
+
 static const struct ethtool_ops i40e_ethtool_ops = {
 	.get_settings		= i40e_get_settings,
 	.get_drvinfo		= i40e_get_drvinfo,
@@ -1439,6 +1688,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
 	.nway_reset		= i40e_nway_reset,
 	.get_link		= ethtool_op_get_link,
 	.get_wol		= i40e_get_wol,
+	.set_wol		= i40e_set_wol,
 	.get_eeprom_len		= i40e_get_eeprom_len,
 	.get_eeprom		= i40e_get_eeprom,
 	.get_ringparam		= i40e_get_ringparam,
@@ -1455,6 +1705,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
 	.get_ethtool_stats	= i40e_get_ethtool_stats,
 	.get_coalesce		= i40e_get_coalesce,
 	.set_coalesce		= i40e_set_coalesce,
+	.get_channels		= i40e_get_channels,
+	.set_channels		= i40e_set_channels,
 	.get_ts_info		= i40e_get_ts_info,
 };
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 901804af8b0e..bf2d4cc5b569 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -47,10 +46,10 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
 					      u64 direct_mode_sz)
 {
 	enum i40e_memory_type mem_type __attribute__((unused));
-	i40e_status ret_code = 0;
 	struct i40e_hmc_sd_entry *sd_entry;
 	bool dma_mem_alloc_done = false;
 	struct i40e_dma_mem mem;
+	i40e_status ret_code;
 	u64 alloc_len;
 
 	if (NULL == hmc_info->sd_table.sd_entry) {
@@ -90,11 +89,9 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
 			sd_entry->u.pd_table.pd_entry =
 				(struct i40e_hmc_pd_entry *)
 				sd_entry->u.pd_table.pd_entry_virt_mem.va;
-			memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem,
-			       sizeof(struct i40e_dma_mem));
+			sd_entry->u.pd_table.pd_page_addr = mem;
 		} else {
-			memcpy(&sd_entry->u.bp.addr, &mem,
-			       sizeof(struct i40e_dma_mem));
+			sd_entry->u.bp.addr = mem;
 			sd_entry->u.bp.sd_pd_index = sd_index;
 		}
 		/* initialize the sd entry */
@@ -165,7 +162,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
 		if (ret_code)
 			goto exit;
 
-		memcpy(&pd_entry->bp.addr, &mem, sizeof(struct i40e_dma_mem));
+		pd_entry->bp.addr = mem;
 		pd_entry->bp.sd_pd_index = pd_index;
 		pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
 		/* Set page address and valid bit */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index aacd42a261e9..0cd4701234f8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -117,7 +116,6 @@ struct i40e_hmc_info {
  * @hw: pointer to our hw struct
  * @pa: pointer to physical address
  * @sd_index: segment descriptor index
- * @hmc_fn_id: hmc function id
  * @type: if sd entry is direct or paged
  **/
 #define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type)			\
@@ -139,7 +137,6 @@ struct i40e_hmc_info {
  * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
  * @hw: pointer to our hw struct
  * @sd_index: segment descriptor index
- * @hmc_fn_id: hmc function id
  * @type: if sd entry is direct or paged
  **/
 #define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type)			\
@@ -160,7 +157,6 @@ struct i40e_hmc_info {
  * @hw: pointer to our hw struct
  * @sd_idx: segment descriptor index
  * @pd_idx: page descriptor index
- * @hmc_fn_id: hmc function id
  **/
 #define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx)			\
 	wr32((hw), I40E_PFHMC_PDINV,					\
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index a695b91c9c79..d5d98fe2691d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -486,8 +485,7 @@ i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
 		/* Make one big object, a single SD */
 		info.count = 1;
 		ret_code = i40e_create_lan_hmc_object(hw, &info);
-		if ((ret_code) &&
-		    (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
+		if (ret_code && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
 			goto try_type_paged;
 		else if (ret_code)
 			goto configure_lan_hmc_out;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index 00ff35006077..341de925a298 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -113,8 +112,8 @@ enum i40e_hmc_lan_object_size {
 #define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
 #define I40E_HMC_OBJ_SIZE_TXQ         128
 #define I40E_HMC_OBJ_SIZE_RXQ         32
-#define I40E_HMC_OBJ_SIZE_FCOE_CNTX   128
-#define I40E_HMC_OBJ_SIZE_FCOE_FILT   32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX   64
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT   64
 
 enum i40e_hmc_lan_rsrc_type {
 	I40E_HMC_LAN_FULL  = 0,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 12b0932204ba..a4b940862b83 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -27,6 +26,9 @@
 
 /* Local includes */
 #include "i40e.h"
+#ifdef CONFIG_I40E_VXLAN
+#include <net/vxlan.h>
+#endif
 
 const char i40e_driver_name[] = "i40e";
 static const char i40e_driver_string[] =
@@ -36,22 +38,24 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 0
 #define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 11
+#define DRV_VERSION_BUILD 30
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
 	     __stringify(DRV_VERSION_MINOR) "." \
 	     __stringify(DRV_VERSION_BUILD)    DRV_KERN
 const char i40e_driver_version_str[] = DRV_VERSION;
-static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation.";
+static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
 
 /* a bit of forward declarations */
 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
 static void i40e_handle_reset_warning(struct i40e_pf *pf);
 static int i40e_add_vsi(struct i40e_vsi *vsi);
 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
-static int i40e_setup_pf_switch(struct i40e_pf *pf);
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
 static int i40e_setup_misc_vector(struct i40e_pf *pf);
 static void i40e_determine_queue_usage(struct i40e_pf *pf);
 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+static void i40e_fdir_sb_setup(struct i40e_pf *pf);
+static int i40e_veb_get_bw_info(struct i40e_veb *veb);
 
 /* i40e_pci_tbl - PCI Device ID Table
  *
@@ -61,16 +65,16 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
  *   Class, Class Mask, private data (not used) }
  */
 static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
-	{PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0},
-	{PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0},
-	{PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0},
-	{PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0},
-	{PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0},
-	{PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0},
-	{PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0},
-	{PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0},
-	{PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0},
-	{PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X710), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_D), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
 	/* required last entry */
 	{0, }
 };
@@ -354,6 +358,9 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
 	struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
 	int i;
 
+	if (test_bit(__I40E_DOWN, &vsi->state))
+		return stats;
+
 	if (!vsi->tx_rings)
 		return stats;
 
@@ -416,7 +423,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
 	memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
 	memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
 	memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
-	if (vsi->rx_rings)
+	if (vsi->rx_rings && vsi->rx_rings[0]) {
 		for (i = 0; i < vsi->num_queue_pairs; i++) {
 			memset(&vsi->rx_rings[i]->stats, 0 ,
 			       sizeof(vsi->rx_rings[i]->stats));
@@ -427,6 +434,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
 			memset(&vsi->tx_rings[i]->tx_stats, 0,
 			       sizeof(vsi->tx_rings[i]->tx_stats));
 		}
+	}
 	vsi->stat_offsets_loaded = false;
 }
 
@@ -461,7 +469,7 @@ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
 {
 	u64 new_data;
 
-	if (hw->device_id == I40E_QEMU_DEVICE_ID) {
+	if (hw->device_id == I40E_DEV_ID_QEMU) {
 		new_data = rd32(hw, loreg);
 		new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
 	} else {
@@ -577,10 +585,11 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
 	i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
 			   veb->stat_offsets_loaded,
 			   &oes->tx_discards, &es->tx_discards);
-	i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
-			   veb->stat_offsets_loaded,
-			   &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
-
+	if (hw->revision_id > 0)
+		i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
+				   veb->stat_offsets_loaded,
+				   &oes->rx_unknown_protocol,
+				   &es->rx_unknown_protocol);
 	i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
 			   veb->stat_offsets_loaded,
 			   &oes->rx_bytes, &es->rx_bytes);
@@ -778,8 +787,8 @@ void i40e_update_stats(struct i40e_vsi *vsi)
 		} while (u64_stats_fetch_retry_bh(&p->syncp, start));
 		rx_b += bytes;
 		rx_p += packets;
-		rx_buf += p->rx_stats.alloc_rx_buff_failed;
-		rx_page += p->rx_stats.alloc_rx_page_failed;
+		rx_buf += p->rx_stats.alloc_buff_failed;
+		rx_page += p->rx_stats.alloc_page_failed;
 	}
 	rcu_read_unlock();
 	vsi->tx_restart = tx_restart;
@@ -1065,7 +1074,7 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
 		if (!i40e_find_filter(vsi, macaddr, f->vlan,
 				      is_vf, is_netdev)) {
 			if (!i40e_add_filter(vsi, macaddr, f->vlan,
-						is_vf, is_netdev))
+					     is_vf, is_netdev))
 				return NULL;
 		}
 	}
@@ -1207,6 +1216,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
 	if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
 		return 0;
 
+	if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+		return -EADDRNOTAVAIL;
+
 	if (vsi->type == I40E_VSI_MAIN) {
 		i40e_status ret;
 		ret = i40e_aq_mac_address_write(&vsi->back->hw,
@@ -1260,6 +1273,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
 	u8 offset;
 	u16 qmap;
 	int i;
+	u16 num_tc_qps = 0;
 
 	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
 	offset = 0;
@@ -1281,6 +1295,9 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
 
 	vsi->tc_config.numtc = numtc;
 	vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
+	/* Number of queues per enabled TC */
+	num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc);
+	num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
 
 	/* Setup queue offset/count for all TCs for given VSI */
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
@@ -1288,30 +1305,25 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
 		if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
 			int pow, num_qps;
 
-			vsi->tc_config.tc_info[i].qoffset = offset;
 			switch (vsi->type) {
 			case I40E_VSI_MAIN:
-				if (i == 0)
-					qcount = pf->rss_size;
-				else
-					qcount = pf->num_tc_qps;
-				vsi->tc_config.tc_info[i].qcount = qcount;
+				qcount = min_t(int, pf->rss_size, num_tc_qps);
 				break;
 			case I40E_VSI_FDIR:
 			case I40E_VSI_SRIOV:
 			case I40E_VSI_VMDQ2:
 			default:
-				qcount = vsi->alloc_queue_pairs;
-				vsi->tc_config.tc_info[i].qcount = qcount;
+				qcount = num_tc_qps;
 				WARN_ON(i != 0);
 				break;
 			}
+			vsi->tc_config.tc_info[i].qoffset = offset;
+			vsi->tc_config.tc_info[i].qcount = qcount;
 
 			/* find the power-of-2 of the number of queue pairs */
-			num_qps = vsi->tc_config.tc_info[i].qcount;
+			num_qps = qcount;
 			pow = 0;
-			while (num_qps &&
-			      ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) {
+			while (num_qps && ((1 << pow) < qcount)) {
 				pow++;
 				num_qps >>= 1;
 			}
@@ -1321,7 +1333,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
 			    (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
 			    (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
 
-			offset += vsi->tc_config.tc_info[i].qcount;
+			offset += qcount;
 		} else {
 			/* TC is not enabled so set the offset to
 			 * default queue and allocate one queue
@@ -1497,11 +1509,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 				cpu_to_le16((u16)(f->vlan ==
 					    I40E_VLAN_ANY ? 0 : f->vlan));
 
-			/* vlan0 as wild card to allow packets from all vlans */
-			if (f->vlan == I40E_VLAN_ANY ||
-			    (vsi->netdev && !(vsi->netdev->features &
-					      NETIF_F_HW_VLAN_CTAG_FILTER)))
-				cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
 			cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
 			del_list[num_del].flags = cmd_flags;
 			num_del++;
@@ -1567,12 +1574,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 			add_list[num_add].queue_number = 0;
 
 			cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
-
-			/* vlan0 as wild card to allow packets from all vlans */
-			if (f->vlan == I40E_VLAN_ANY || (vsi->netdev &&
-			    !(vsi->netdev->features &
-						 NETIF_F_HW_VLAN_CTAG_FILTER)))
-				cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
 			add_list[num_add].flags = cpu_to_le16(cmd_flags);
 			num_add++;
 
@@ -1638,6 +1639,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 			dev_info(&pf->pdev->dev,
 				 "set uni promisc failed, err %d, aq_err %d\n",
 				 aq_ret, pf->hw.aq.asq_last_status);
+		aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+						   vsi->seid,
+						   cur_promisc, NULL);
+		if (aq_ret)
+			dev_info(&pf->pdev->dev,
+				 "set brdcast promisc failed, err %d, aq_err %d\n",
+				 aq_ret, pf->hw.aq.asq_last_status);
 	}
 
 	clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1690,6 +1698,27 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
 }
 
 /**
+ * i40e_ioctl - Access the hwtstamp interface
+ * @netdev: network interface device structure
+ * @ifr: interface request data
+ * @cmd: ioctl command
+ **/
+int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+
+	switch (cmd) {
+	case SIOCGHWTSTAMP:
+		return i40e_ptp_get_ts_config(pf, ifr);
+	case SIOCSHWTSTAMP:
+		return i40e_ptp_set_ts_config(pf, ifr);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+/**
  * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
  * @vsi: the vsi being adjusted
  **/
@@ -1771,7 +1800,6 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
 {
 	struct i40e_mac_filter *f, *add_f;
 	bool is_netdev, is_vf;
-	int ret;
 
 	is_vf = (vsi->type == I40E_VSI_SRIOV);
 	is_netdev = !!(vsi->netdev);
@@ -1797,13 +1825,6 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
 		}
 	}
 
-	ret = i40e_sync_vsi_filters(vsi);
-	if (ret) {
-		dev_info(&vsi->back->pdev->dev,
-			 "Could not sync filters for vid %d\n", vid);
-		return ret;
-	}
-
 	/* Now if we add a vlan tag, make sure to check if it is the first
 	 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
 	 * with 0, so we now accept untagged and specified tagged traffic
@@ -1824,7 +1845,10 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
 				return -ENOMEM;
 			}
 		}
+	}
 
+	/* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
+	if (vid > 0 && !vsi->info.pvid) {
 		list_for_each_entry(f, &vsi->mac_filter_list, list) {
 			if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
 					     is_vf, is_netdev)) {
@@ -1840,10 +1864,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
 				}
 			}
 		}
-		ret = i40e_sync_vsi_filters(vsi);
 	}
 
-	return ret;
+	if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+		return 0;
+
+	return i40e_sync_vsi_filters(vsi);
 }
 
 /**
@@ -1859,7 +1886,6 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
 	struct i40e_mac_filter *f, *add_f;
 	bool is_vf, is_netdev;
 	int filter_count = 0;
-	int ret;
 
 	is_vf = (vsi->type == I40E_VSI_SRIOV);
 	is_netdev = !!(netdev);
@@ -1870,12 +1896,6 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
 	list_for_each_entry(f, &vsi->mac_filter_list, list)
 		i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
 
-	ret = i40e_sync_vsi_filters(vsi);
-	if (ret) {
-		dev_info(&vsi->back->pdev->dev, "Could not sync filters\n");
-		return ret;
-	}
-
 	/* go through all the filters for this VSI and if there is only
 	 * vid == 0 it means there are no other filters, so vid 0 must
 	 * be replaced with -1. This signifies that we should from now
@@ -1918,6 +1938,10 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
 		}
 	}
 
+	if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+		return 0;
+
 	return i40e_sync_vsi_filters(vsi);
 }
 
@@ -2008,8 +2032,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
 
 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
 	vsi->info.pvid = cpu_to_le16(vid);
-	vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
-	vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
+				    I40E_AQ_VSI_PVLAN_INSERT_PVID |
+				    I40E_AQ_VSI_PVLAN_EMOD_STR;
 
 	ctxt.seid = vsi->seid;
 	memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
@@ -2032,8 +2057,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
  **/
 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
 {
+	i40e_vlan_stripping_disable(vsi);
+
 	vsi->info.pvid = 0;
-	i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
 }
 
 /**
@@ -2066,8 +2092,11 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
 {
 	int i;
 
+	if (!vsi->tx_rings)
+		return;
+
 	for (i = 0; i < vsi->num_queue_pairs; i++)
-		if (vsi->tx_rings[i]->desc)
+		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
 			i40e_free_tx_resources(vsi->tx_rings[i]);
 }
 
@@ -2100,8 +2129,11 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
 {
 	int i;
 
+	if (!vsi->rx_rings)
+		return;
+
 	for (i = 0; i < vsi->num_queue_pairs; i++)
-		if (vsi->rx_rings[i]->desc)
+		if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
 			i40e_free_rx_resources(vsi->rx_rings[i]);
 }
 
@@ -2121,7 +2153,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
 	u32 qtx_ctl = 0;
 
 	/* some ATR related tx ring init */
-	if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) {
+	if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
 		ring->atr_sample_rate = vsi->back->atr_sample_rate;
 		ring->atr_count = 0;
 	} else {
@@ -2130,6 +2162,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
 
 	/* initialize XPS */
 	if (ring->q_vector && ring->netdev &&
+	    vsi->tc_config.numtc <= 1 &&
 	    !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
 		netif_set_xps_queue(ring->netdev,
 				    &ring->q_vector->affinity_mask,
@@ -2141,8 +2174,9 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
 	tx_ctx.new_context = 1;
 	tx_ctx.base = (ring->dma / 128);
 	tx_ctx.qlen = ring->count;
-	tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED |
-			I40E_FLAG_FDIR_ATR_ENABLED));
+	tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
+					       I40E_FLAG_FD_ATR_ENABLED));
+	tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
 
 	/* As part of VSI creation/update, FW allocates certain
 	 * Tx arbitration queue sets for each TC enabled for
@@ -2176,7 +2210,10 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
 	}
 
 	/* Now associate this queue with this PCI function */
-	qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+	if (vsi->type == I40E_VSI_VMDQ2)
+		qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
+	else
+		qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
 	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
 		    I40E_QTX_CTL_PF_INDX_MASK);
 	wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
@@ -2243,7 +2280,10 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
 	rx_ctx.tphwdesc_ena = 1;
 	rx_ctx.tphdata_ena = 1;
 	rx_ctx.tphhead_ena = 1;
-	rx_ctx.lrxqthresh = 2;
+	if (hw->revision_id == 0)
+		rx_ctx.lrxqthresh = 0;
+	else
+		rx_ctx.lrxqthresh = 2;
 	rx_ctx.crcstrip = 1;
 	rx_ctx.l2tsel = 1;
 	rx_ctx.showiv = 1;
@@ -2477,6 +2517,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
 	      I40E_PFINT_ICR0_ENA_GRST_MASK          |
 	      I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
 	      I40E_PFINT_ICR0_ENA_GPIO_MASK          |
+	      I40E_PFINT_ICR0_ENA_TIMESYNC_MASK      |
 	      I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK  |
 	      I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
 	      I40E_PFINT_ICR0_ENA_VFLR_MASK          |
@@ -2485,8 +2526,8 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
 	wr32(hw, I40E_PFINT_ICR0_ENA, val);
 
 	/* SW_ITR_IDX = 0, but don't change INTENA */
-	wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
-					I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
+	wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
+					I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
 
 	/* OTHER_ITR_IDX = 0 */
 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
@@ -2532,6 +2573,19 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
 }
 
 /**
+ * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
+ * @pf: board private structure
+ **/
+void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = &pf->hw;
+
+	wr32(hw, I40E_PFINT_DYN_CTL0,
+	     I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+	i40e_flush(hw);
+}
+
+/**
  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
  * @pf: board private structure
  **/
@@ -2584,23 +2638,6 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
 }
 
 /**
- * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings
- * @irq: interrupt number
- * @data: pointer to a q_vector
- **/
-static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
-{
-	struct i40e_q_vector *q_vector = data;
-
-	if (!q_vector->tx.ring && !q_vector->rx.ring)
-		return IRQ_HANDLED;
-
-	pr_info("fdir ring cleaning needed\n");
-
-	return IRQ_HANDLED;
-}
-
-/**
  * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
  * @vsi: the VSI being configured
  * @basename: name for the vector
@@ -2740,20 +2777,21 @@ static irqreturn_t i40e_intr(int irq, void *data)
 {
 	struct i40e_pf *pf = (struct i40e_pf *)data;
 	struct i40e_hw *hw = &pf->hw;
+	irqreturn_t ret = IRQ_NONE;
 	u32 icr0, icr0_remaining;
 	u32 val, ena_mask;
 
 	icr0 = rd32(hw, I40E_PFINT_ICR0);
-
-	val = rd32(hw, I40E_PFINT_DYN_CTL0);
-	val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
-	wr32(hw, I40E_PFINT_DYN_CTL0, val);
+	ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
 
 	/* if sharing a legacy IRQ, we might get called w/o an intr pending */
 	if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
-		return IRQ_NONE;
+		goto enable_intr;
 
-	ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+	/* if interrupt but no bits showing, must be SWINT */
+	if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
+	    (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
+		pf->sw_int_count++;
 
 	/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
@@ -2793,14 +2831,31 @@ static irqreturn_t i40e_intr(int irq, void *data)
 		val = rd32(hw, I40E_GLGEN_RSTAT);
 		val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
 		       >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
-		if (val & I40E_RESET_CORER)
+		if (val == I40E_RESET_CORER)
 			pf->corer_count++;
-		else if (val & I40E_RESET_GLOBR)
+		else if (val == I40E_RESET_GLOBR)
 			pf->globr_count++;
-		else if (val & I40E_RESET_EMPR)
+		else if (val == I40E_RESET_EMPR)
 			pf->empr_count++;
 	}
 
+	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
+		icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
+		dev_info(&pf->pdev->dev, "HMC error interrupt\n");
+	}
+
+	if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
+		u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
+
+		if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
+			ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+			i40e_ptp_tx_hwtstamp(pf);
+			prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK;
+		}
+
+		wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat);
+	}
+
 	/* If a critical error is pending we have no choice but to reset the
 	 * device.
 	 * Report and mask out any remaining unexpected interrupts.
@@ -2809,22 +2864,19 @@ static irqreturn_t i40e_intr(int irq, void *data)
 	if (icr0_remaining) {
 		dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
 			 icr0_remaining);
-		if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) ||
-		    (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
+		if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
 		    (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
 		    (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
 		    (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
-			if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
-				dev_info(&pf->pdev->dev, "HMC error interrupt\n");
-			} else {
-				dev_info(&pf->pdev->dev, "device will be reset\n");
-				set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
-				i40e_service_event_schedule(pf);
-			}
+			dev_info(&pf->pdev->dev, "device will be reset\n");
+			set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+			i40e_service_event_schedule(pf);
 		}
 		ena_mask &= ~icr0_remaining;
 	}
+	ret = IRQ_HANDLED;
 
+enable_intr:
 	/* re-enable interrupt causes */
 	wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
 	if (!test_bit(__I40E_DOWN, &pf->state)) {
@@ -2832,6 +2884,94 @@ static irqreturn_t i40e_intr(int irq, void *data)
 		i40e_irq_dynamic_enable_icr0(pf);
 	}
 
+	return ret;
+}
+
+/**
+ * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring:  tx ring to clean
+ * @budget:   how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
+{
+	struct i40e_vsi *vsi = tx_ring->vsi;
+	u16 i = tx_ring->next_to_clean;
+	struct i40e_tx_buffer *tx_buf;
+	struct i40e_tx_desc *tx_desc;
+
+	tx_buf = &tx_ring->tx_bi[i];
+	tx_desc = I40E_TX_DESC(tx_ring, i);
+	i -= tx_ring->count;
+
+	do {
+		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+		/* if next_to_watch is not set then there is no work pending */
+		if (!eop_desc)
+			break;
+
+		/* prevent any other reads prior to eop_desc */
+		read_barrier_depends();
+
+		/* if the descriptor isn't done, no work yet to do */
+		if (!(eop_desc->cmd_type_offset_bsz &
+		      cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+			break;
+
+		/* clear next_to_watch to prevent false hangs */
+		tx_buf->next_to_watch = NULL;
+
+		/* unmap skb header data */
+		dma_unmap_single(tx_ring->dev,
+				 dma_unmap_addr(tx_buf, dma),
+				 dma_unmap_len(tx_buf, len),
+				 DMA_TO_DEVICE);
+
+		dma_unmap_len_set(tx_buf, len, 0);
+
+
+		/* move to the next desc and buffer to clean */
+		tx_buf++;
+		tx_desc++;
+		i++;
+		if (unlikely(!i)) {
+			i -= tx_ring->count;
+			tx_buf = tx_ring->tx_bi;
+			tx_desc = I40E_TX_DESC(tx_ring, 0);
+		}
+
+		/* update budget accounting */
+		budget--;
+	} while (likely(budget));
+
+	i += tx_ring->count;
+	tx_ring->next_to_clean = i;
+
+	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+		i40e_irq_dynamic_enable(vsi,
+				tx_ring->q_vector->v_idx + vsi->base_vector);
+	}
+	return budget > 0;
+}
+
+/**
+ * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
+{
+	struct i40e_q_vector *q_vector = data;
+	struct i40e_vsi *vsi;
+
+	if (!q_vector->tx.ring)
+		return IRQ_HANDLED;
+
+	vsi = q_vector->tx.ring->vsi;
+	i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
+
 	return IRQ_HANDLED;
 }
 
@@ -2974,28 +3114,20 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
 		} while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
 			       ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
 
-		if (enable) {
-			/* is STAT set ? */
-			if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
-				dev_info(&pf->pdev->dev,
-					 "Tx %d already enabled\n", i);
-				continue;
-			}
-		} else {
-			/* is !STAT set ? */
-			if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
-				dev_info(&pf->pdev->dev,
-					 "Tx %d already disabled\n", i);
-				continue;
-			}
-		}
+		/* Skip if the queue is already in the requested state */
+		if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+			continue;
+		if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+			continue;
 
 		/* turn on/off the queue */
-		if (enable)
+		if (enable) {
+			wr32(hw, I40E_QTX_HEAD(pf_q), 0);
 			tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
 				  I40E_QTX_ENA_QENA_STAT_MASK;
-		else
+		} else {
 			tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+		}
 
 		wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
 
@@ -3019,6 +3151,9 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
 		}
 	}
 
+	if (hw->revision_id == 0)
+		mdelay(50);
+
 	return 0;
 }
 
@@ -3091,9 +3226,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
  * @vsi: the VSI being configured
  * @enable: start or stop the rings
  **/
-static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
+int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
 {
-	int ret;
+	int ret = 0;
 
 	/* do rx first for enable and last for disable */
 	if (request) {
@@ -3102,10 +3237,9 @@ static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
 			return ret;
 		ret = i40e_vsi_control_tx(vsi, request);
 	} else {
-		ret = i40e_vsi_control_tx(vsi, request);
-		if (ret)
-			return ret;
-		ret = i40e_vsi_control_rx(vsi, request);
+		/* Ignore return value, we need to shutdown whatever we can */
+		i40e_vsi_control_tx(vsi, request);
+		i40e_vsi_control_rx(vsi, request);
 	}
 
 	return ret;
@@ -3131,7 +3265,8 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
 			u16 vector = i + base;
 
 			/* free only the irqs that were actually requested */
-			if (vsi->q_vectors[i]->num_ringpairs == 0)
+			if (!vsi->q_vectors[i] ||
+			    !vsi->q_vectors[i]->num_ringpairs)
 				continue;
 
 			/* clear the affinity_mask in the IRQ descriptor */
@@ -3543,7 +3678,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
 
 	/* Get the VSI level BW configuration per TC */
 	aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
-					          NULL);
+						  NULL);
 	if (aq_ret) {
 		dev_info(&pf->pdev->dev,
 			 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
@@ -3754,6 +3889,149 @@ out:
 }
 
 /**
+ * i40e_veb_config_tc - Configure TCs for given VEB
+ * @veb: given VEB
+ * @enabled_tc: TC bitmap
+ *
+ * Configures given TC bitmap for VEB (switching) element
+ **/
+int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
+{
+	struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
+	struct i40e_pf *pf = veb->pf;
+	int ret = 0;
+	int i;
+
+	/* No TCs or already enabled TCs just return */
+	if (!enabled_tc || veb->enabled_tc == enabled_tc)
+		return ret;
+
+	bw_data.tc_valid_bits = enabled_tc;
+	/* bw_data.absolute_credits is not set (relative) */
+
+	/* Enable ETS TCs with equal BW Share for now */
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		if (enabled_tc & (1 << i))
+			bw_data.tc_bw_share_credits[i] = 1;
+	}
+
+	ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
+						   &bw_data, NULL);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "veb bw config failed, aq_err=%d\n",
+			 pf->hw.aq.asq_last_status);
+		goto out;
+	}
+
+	/* Update the BW information */
+	ret = i40e_veb_get_bw_info(veb);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "Failed getting veb bw config, aq_err=%d\n",
+			 pf->hw.aq.asq_last_status);
+	}
+
+out:
+	return ret;
+}
+
+#ifdef CONFIG_I40E_DCB
+/**
+ * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
+ * @pf: PF struct
+ *
+ * Reconfigure VEB/VSIs on a given PF; it is assumed that
+ * the caller would've quiesce all the VSIs before calling
+ * this function
+ **/
+static void i40e_dcb_reconfigure(struct i40e_pf *pf)
+{
+	u8 tc_map = 0;
+	int ret;
+	u8 v;
+
+	/* Enable the TCs available on PF to all VEBs */
+	tc_map = i40e_pf_get_tc_map(pf);
+	for (v = 0; v < I40E_MAX_VEB; v++) {
+		if (!pf->veb[v])
+			continue;
+		ret = i40e_veb_config_tc(pf->veb[v], tc_map);
+		if (ret) {
+			dev_info(&pf->pdev->dev,
+				 "Failed configuring TC for VEB seid=%d\n",
+				 pf->veb[v]->seid);
+			/* Will try to configure as many components */
+		}
+	}
+
+	/* Update each VSI */
+	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+		if (!pf->vsi[v])
+			continue;
+
+		/* - Enable all TCs for the LAN VSI
+		 * - For all others keep them at TC0 for now
+		 */
+		if (v == pf->lan_vsi)
+			tc_map = i40e_pf_get_tc_map(pf);
+		else
+			tc_map = i40e_pf_get_default_tc(pf);
+
+		ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
+		if (ret) {
+			dev_info(&pf->pdev->dev,
+				 "Failed configuring TC for VSI seid=%d\n",
+				 pf->vsi[v]->seid);
+			/* Will try to configure as many components */
+		} else {
+			if (pf->vsi[v]->netdev)
+				i40e_dcbnl_set_all(pf->vsi[v]);
+		}
+	}
+}
+
+/**
+ * i40e_init_pf_dcb - Initialize DCB configuration
+ * @pf: PF being configured
+ *
+ * Query the current DCB configuration and cache it
+ * in the hardware structure
+ **/
+static int i40e_init_pf_dcb(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = &pf->hw;
+	int err = 0;
+
+	if (pf->hw.func_caps.npar_enable)
+		goto out;
+
+	/* Get the initial DCB configuration */
+	err = i40e_init_dcb(hw);
+	if (!err) {
+		/* Device/Function is not DCBX capable */
+		if ((!hw->func_caps.dcb) ||
+		    (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
+			dev_info(&pf->pdev->dev,
+				 "DCBX offload is not supported or is disabled for this PF.\n");
+
+			if (pf->flags & I40E_FLAG_MFP_ENABLED)
+				goto out;
+
+		} else {
+			/* When status is not DISABLED then DCBX in FW */
+			pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
+				       DCB_CAP_DCBX_VER_IEEE;
+			pf->flags |= I40E_FLAG_DCB_ENABLED;
+		}
+	}
+
+out:
+	return err;
+}
+#endif /* CONFIG_I40E_DCB */
+
+/**
  * i40e_up_complete - Finish the last steps of bringing up a connection
  * @vsi: the VSI being configured
  **/
@@ -3957,22 +4235,28 @@ static int i40e_open(struct net_device *netdev)
 	if (err)
 		goto err_setup_rx;
 
+	/* Notify the stack of the actual queue counts. */
+	err = netif_set_real_num_tx_queues(netdev, vsi->num_queue_pairs);
+	if (err)
+		goto err_set_queues;
+
+	err = netif_set_real_num_rx_queues(netdev, vsi->num_queue_pairs);
+	if (err)
+		goto err_set_queues;
+
 	err = i40e_up_complete(vsi);
 	if (err)
 		goto err_up_complete;
 
-	if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
-		err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL);
-		if (err)
-			netdev_info(netdev,
-				    "couldn't set broadcast err %d aq_err %d\n",
-				    err, pf->hw.aq.asq_last_status);
-	}
+#ifdef CONFIG_I40E_VXLAN
+	vxlan_get_rx_port(netdev);
+#endif
 
 	return 0;
 
 err_up_complete:
 	i40e_down(vsi);
+err_set_queues:
 	i40e_vsi_free_irq(vsi);
 err_setup_rx:
 	i40e_vsi_free_rx_resources(vsi);
@@ -4054,6 +4338,24 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
 		i40e_flush(&pf->hw);
 
+	} else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
+
+		/* Request a Firmware Reset
+		 *
+		 * Same as Global reset, plus restarting the
+		 * embedded firmware engine.
+		 */
+		/* enable EMP Reset */
+		val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
+		val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
+		wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
+
+		/* force the reset */
+		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+		val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
+		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+		i40e_flush(&pf->hw);
+
 	} else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
 
 		/* Request a PF Reset
@@ -4091,6 +4393,143 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
 	}
 }
 
+#ifdef CONFIG_I40E_DCB
+/**
+ * i40e_dcb_need_reconfig - Check if DCB needs reconfig
+ * @pf: board private structure
+ * @old_cfg: current DCB config
+ * @new_cfg: new DCB config
+ **/
+bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
+			    struct i40e_dcbx_config *old_cfg,
+			    struct i40e_dcbx_config *new_cfg)
+{
+	bool need_reconfig = false;
+
+	/* Check if ETS configuration has changed */
+	if (memcmp(&new_cfg->etscfg,
+		   &old_cfg->etscfg,
+		   sizeof(new_cfg->etscfg))) {
+		/* If Priority Table has changed reconfig is needed */
+		if (memcmp(&new_cfg->etscfg.prioritytable,
+			   &old_cfg->etscfg.prioritytable,
+			   sizeof(new_cfg->etscfg.prioritytable))) {
+			need_reconfig = true;
+			dev_info(&pf->pdev->dev, "ETS UP2TC changed.\n");
+		}
+
+		if (memcmp(&new_cfg->etscfg.tcbwtable,
+			   &old_cfg->etscfg.tcbwtable,
+			   sizeof(new_cfg->etscfg.tcbwtable)))
+			dev_info(&pf->pdev->dev, "ETS TC BW Table changed.\n");
+
+		if (memcmp(&new_cfg->etscfg.tsatable,
+			   &old_cfg->etscfg.tsatable,
+			   sizeof(new_cfg->etscfg.tsatable)))
+			dev_info(&pf->pdev->dev, "ETS TSA Table changed.\n");
+	}
+
+	/* Check if PFC configuration has changed */
+	if (memcmp(&new_cfg->pfc,
+		   &old_cfg->pfc,
+		   sizeof(new_cfg->pfc))) {
+		need_reconfig = true;
+		dev_info(&pf->pdev->dev, "PFC config change detected.\n");
+	}
+
+	/* Check if APP Table has changed */
+	if (memcmp(&new_cfg->app,
+		   &old_cfg->app,
+		   sizeof(new_cfg->app)))
+		need_reconfig = true;
+		dev_info(&pf->pdev->dev, "APP Table change detected.\n");
+
+	return need_reconfig;
+}
+
+/**
+ * i40e_handle_lldp_event - Handle LLDP Change MIB event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ **/
+static int i40e_handle_lldp_event(struct i40e_pf *pf,
+				  struct i40e_arq_event_info *e)
+{
+	struct i40e_aqc_lldp_get_mib *mib =
+		(struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
+	struct i40e_dcbx_config tmp_dcbx_cfg;
+	bool need_reconfig = false;
+	int ret = 0;
+	u8 type;
+
+	/* Ignore if event is not for Nearest Bridge */
+	type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
+		& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+	if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
+		return ret;
+
+	/* Check MIB Type and return if event for Remote MIB update */
+	type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+	if (type == I40E_AQ_LLDP_MIB_REMOTE) {
+		/* Update the remote cached instance and return */
+		ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+				I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+				&hw->remote_dcbx_config);
+		goto exit;
+	}
+
+	/* Convert/store the DCBX data from LLDPDU temporarily */
+	memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
+	ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg);
+	if (ret) {
+		/* Error in LLDPDU parsing return */
+		dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n");
+		goto exit;
+	}
+
+	/* No change detected in DCBX configs */
+	if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
+		dev_info(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
+		goto exit;
+	}
+
+	need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg);
+
+	i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg);
+
+	/* Overwrite the new configuration */
+	*dcbx_cfg = tmp_dcbx_cfg;
+
+	if (!need_reconfig)
+		goto exit;
+
+	/* Reconfiguration needed quiesce all VSIs */
+	i40e_pf_quiesce_all_vsi(pf);
+
+	/* Changes in configuration update VEB/VSI */
+	i40e_dcb_reconfigure(pf);
+
+	i40e_pf_unquiesce_all_vsi(pf);
+exit:
+	return ret;
+}
+#endif /* CONFIG_I40E_DCB */
+
+/**
+ * i40e_do_reset_safe - Protected reset path for userland calls.
+ * @pf: board private structure
+ * @reset_flags: which reset is requested
+ *
+ **/
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
+{
+	rtnl_lock();
+	i40e_do_reset(pf, reset_flags);
+	rtnl_unlock();
+}
+
 /**
  * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
  * @pf: board private structure
@@ -4245,6 +4684,9 @@ static void i40e_link_event(struct i40e_pf *pf)
 
 	if (pf->vf)
 		i40e_vc_notify_link_state(pf);
+
+	if (pf->flags & I40E_FLAG_PTP)
+		i40e_ptp_set_increment(pf);
 }
 
 /**
@@ -4326,6 +4768,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
 	for (i = 0; i < I40E_MAX_VEB; i++)
 		if (pf->veb[i])
 			i40e_update_veb_stats(pf->veb[i]);
+
+	i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
 }
 
 /**
@@ -4336,6 +4780,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
 {
 	u32 reset_flags = 0;
 
+	rtnl_lock();
 	if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
 		reset_flags |= (1 << __I40E_REINIT_REQUESTED);
 		clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
@@ -4358,7 +4803,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
 	 */
 	if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
 		i40e_handle_reset_warning(pf);
-		return;
+		goto unlock;
 	}
 
 	/* If we're already down or resetting, just bail */
@@ -4366,6 +4811,9 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
 	    !test_bit(__I40E_DOWN, &pf->state) &&
 	    !test_bit(__I40E_CONFIG_BUSY, &pf->state))
 		i40e_do_reset(pf, reset_flags);
+
+unlock:
+	rtnl_unlock();
 }
 
 /**
@@ -4429,6 +4877,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
 		return;
 
 	do {
+		event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
 		ret = i40e_clean_arq_element(hw, &event, &pending);
 		if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
 			dev_info(&pf->pdev->dev, "No ARQ event found\n");
@@ -4454,15 +4903,23 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
 			break;
 		case i40e_aqc_opc_lldp_update_mib:
 			dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
+#ifdef CONFIG_I40E_DCB
+			rtnl_lock();
+			ret = i40e_handle_lldp_event(pf, &event);
+			rtnl_unlock();
+#endif /* CONFIG_I40E_DCB */
 			break;
 		case i40e_aqc_opc_event_lan_overflow:
 			dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
 			i40e_handle_lan_overflow_event(pf, &event);
 			break;
+		case i40e_aqc_opc_send_msg_to_peer:
+			dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
+			break;
 		default:
 			dev_info(&pf->pdev->dev,
-				 "ARQ Error: Unknown event %d received\n",
-				 event.desc.opcode);
+				 "ARQ Error: Unknown event 0x%04x received\n",
+				 opcode);
 			break;
 		}
 	} while (pending && (i++ < pf->adminq_work_limit));
@@ -4592,6 +5049,9 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
 		}
 	} while (err);
 
+	/* increment MSI-X count because current FW skips one */
+	pf->hw.func_caps.num_msix_vectors++;
+
 	if (pf->hw.debug_mask & I40E_DEBUG_USER)
 		dev_info(&pf->pdev->dev,
 			 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
@@ -4603,57 +5063,89 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
 			 pf->hw.func_caps.num_tx_qp,
 			 pf->hw.func_caps.num_vsis);
 
+#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
+		       + pf->hw.func_caps.num_vfs)
+	if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
+		dev_info(&pf->pdev->dev,
+			 "got num_vsis %d, setting num_vsis to %d\n",
+			 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
+		pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
+	}
+
 	return 0;
 }
 
+static int i40e_vsi_clear(struct i40e_vsi *vsi);
+
 /**
- * i40e_fdir_setup - initialize the Flow Director resources
+ * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
  * @pf: board private structure
  **/
-static void i40e_fdir_setup(struct i40e_pf *pf)
+static void i40e_fdir_sb_setup(struct i40e_pf *pf)
 {
 	struct i40e_vsi *vsi;
 	bool new_vsi = false;
 	int err, i;
 
-	if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED |
-			   I40E_FLAG_FDIR_ATR_ENABLED)))
+	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
 		return;
 
-	pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
-
-	/* find existing or make new FDIR VSI */
+	/* find existing VSI and see if it needs configuring */
 	vsi = NULL;
-	for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
-		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
+	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
 			vsi = pf->vsi[i];
+			break;
+		}
+	}
+
+	/* create a new VSI if none exists */
 	if (!vsi) {
-		vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0);
+		vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
+				     pf->vsi[pf->lan_vsi]->seid, 0);
 		if (!vsi) {
 			dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
-			pf->flags &= ~I40E_FLAG_FDIR_ENABLED;
-			return;
+			goto err_vsi;
 		}
 		new_vsi = true;
 	}
-	WARN_ON(vsi->base_queue != I40E_FDIR_RING);
-	i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings);
+	i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
 
 	err = i40e_vsi_setup_tx_resources(vsi);
-	if (!err)
-		err = i40e_vsi_setup_rx_resources(vsi);
-	if (!err)
-		err = i40e_vsi_configure(vsi);
-	if (!err && new_vsi) {
+	if (err)
+		goto err_setup_tx;
+	err = i40e_vsi_setup_rx_resources(vsi);
+	if (err)
+		goto err_setup_rx;
+
+	if (new_vsi) {
 		char int_name[IFNAMSIZ + 9];
+		err = i40e_vsi_configure(vsi);
+		if (err)
+			goto err_setup_rx;
 		snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
 			 dev_driver_string(&pf->pdev->dev));
 		err = i40e_vsi_request_irq(vsi, int_name);
-	}
-	if (!err)
+		if (err)
+			goto err_setup_rx;
 		err = i40e_up_complete(vsi);
+		if (err)
+			goto err_up_complete;
+	}
 
 	clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
+	return;
+
+err_up_complete:
+	i40e_down(vsi);
+	i40e_vsi_free_irq(vsi);
+err_setup_rx:
+	i40e_vsi_free_rx_resources(vsi);
+err_setup_tx:
+	i40e_vsi_free_tx_resources(vsi);
+err_vsi:
+	pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+	i40e_vsi_clear(vsi);
 }
 
 /**
@@ -4673,26 +5165,25 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
 }
 
 /**
- * i40e_handle_reset_warning - prep for the core to reset
+ * i40e_prep_for_reset - prep for the core to reset
  * @pf: board private structure
  *
- * Close up the VFs and other things in prep for a Core Reset,
- * then get ready to rebuild the world.
- **/
-static void i40e_handle_reset_warning(struct i40e_pf *pf)
+ * Close up the VFs and other things in prep for pf Reset.
+  **/
+static int i40e_prep_for_reset(struct i40e_pf *pf)
 {
-	struct i40e_driver_version dv;
 	struct i40e_hw *hw = &pf->hw;
 	i40e_status ret;
 	u32 v;
 
 	clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
 	if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
-		return;
+		return 0;
 
 	dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
 
-	i40e_vc_notify_reset(pf);
+	if (i40e_check_asq_alive(hw))
+		i40e_vc_notify_reset(pf);
 
 	/* quiesce the VSIs and their queues that are not already DOWN */
 	i40e_pf_quiesce_all_vsi(pf);
@@ -4704,6 +5195,27 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
 
 	i40e_shutdown_adminq(&pf->hw);
 
+	/* call shutdown HMC */
+	ret = i40e_shutdown_lan_hmc(hw);
+	if (ret) {
+		dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
+		clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+	}
+	return ret;
+}
+
+/**
+ * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ **/
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
+{
+	struct i40e_driver_version dv;
+	struct i40e_hw *hw = &pf->hw;
+	i40e_status ret;
+	u32 v;
+
 	/* Now we wait for GRST to settle out.
 	 * We don't have to delete the VEBs or VSIs from the hw switch
 	 * because the reset will make them disappear.
@@ -4731,13 +5243,6 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
 		goto end_core_reset;
 	}
 
-	/* call shutdown HMC */
-	ret = i40e_shutdown_lan_hmc(hw);
-	if (ret) {
-		dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
-		goto end_core_reset;
-	}
-
 	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
 				hw->func_caps.num_rx_qp,
 				pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
@@ -4751,8 +5256,16 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
 		goto end_core_reset;
 	}
 
+#ifdef CONFIG_I40E_DCB
+	ret = i40e_init_pf_dcb(pf);
+	if (ret) {
+		dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret);
+		goto end_core_reset;
+	}
+#endif /* CONFIG_I40E_DCB */
+
 	/* do basic switch setup */
-	ret = i40e_setup_pf_switch(pf);
+	ret = i40e_setup_pf_switch(pf, reinit);
 	if (ret)
 		goto end_core_reset;
 
@@ -4831,6 +5344,22 @@ end_core_reset:
 }
 
 /**
+ * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
+ * @pf: board private structure
+ *
+ * Close up the VFs and other things in prep for a Core Reset,
+ * then get ready to rebuild the world.
+ **/
+static void i40e_handle_reset_warning(struct i40e_pf *pf)
+{
+	i40e_status ret;
+
+	ret = i40e_prep_for_reset(pf);
+	if (!ret)
+		i40e_reset_and_rebuild(pf, false);
+}
+
+/**
  * i40e_handle_mdd_event
  * @pf: pointer to the pf structure
  *
@@ -4911,6 +5440,52 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
 	i40e_flush(hw);
 }
 
+#ifdef CONFIG_I40E_VXLAN
+/**
+ * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
+ * @pf: board private structure
+ **/
+static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
+{
+	const int vxlan_hdr_qwords = 4;
+	struct i40e_hw *hw = &pf->hw;
+	i40e_status ret;
+	u8 filter_index;
+	__be16 port;
+	int i;
+
+	if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
+		return;
+
+	pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
+
+	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+		if (pf->pending_vxlan_bitmap & (1 << i)) {
+			pf->pending_vxlan_bitmap &= ~(1 << i);
+			port = pf->vxlan_ports[i];
+			ret = port ?
+			      i40e_aq_add_udp_tunnel(hw, ntohs(port),
+						     vxlan_hdr_qwords,
+						     I40E_AQC_TUNNEL_TYPE_VXLAN,
+						     &filter_index, NULL)
+			      : i40e_aq_del_udp_tunnel(hw, i, NULL);
+
+			if (ret) {
+				dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
+					 port ? "adding" : "deleting",
+					 ntohs(port), port ? i : i);
+
+				pf->vxlan_ports[i] = 0;
+			} else {
+				dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
+					 port ? "Added" : "Deleted",
+					 ntohs(port), port ? i : filter_index);
+			}
+		}
+	}
+}
+
+#endif
 /**
  * i40e_service_task - Run the driver's async subtasks
  * @work: pointer to work_struct containing our data
@@ -4929,6 +5504,9 @@ static void i40e_service_task(struct work_struct *work)
 	i40e_fdir_reinit_subtask(pf);
 	i40e_check_hang_subtask(pf);
 	i40e_sync_filters_subtask(pf);
+#ifdef CONFIG_I40E_VXLAN
+	i40e_sync_vxlan_filters_subtask(pf);
+#endif
 	i40e_clean_adminq_subtask(pf);
 
 	i40e_service_event_complete(pf);
@@ -5006,6 +5584,42 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
 }
 
 /**
+ * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
+ * @type: VSI pointer
+ * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
+{
+	int size;
+	int ret = 0;
+
+	/* allocate memory for both Tx and Rx ring pointers */
+	size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
+	vsi->tx_rings = kzalloc(size, GFP_KERNEL);
+	if (!vsi->tx_rings)
+		return -ENOMEM;
+	vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
+
+	if (alloc_qvectors) {
+		/* allocate memory for q_vector pointers */
+		size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
+		vsi->q_vectors = kzalloc(size, GFP_KERNEL);
+		if (!vsi->q_vectors) {
+			ret = -ENOMEM;
+			goto err_vectors;
+		}
+	}
+	return ret;
+
+err_vectors:
+	kfree(vsi->tx_rings);
+	return ret;
+}
+
+/**
  * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
  * @pf: board private structure
  * @type: type of VSI
@@ -5017,8 +5631,6 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
 {
 	int ret = -ENODEV;
 	struct i40e_vsi *vsi;
-	int sz_vectors;
-	int sz_rings;
 	int vsi_idx;
 	int i;
 
@@ -5068,22 +5680,9 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
 	if (ret)
 		goto err_rings;
 
-	/* allocate memory for ring pointers */
-	sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
-	vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL);
-	if (!vsi->tx_rings) {
-		ret = -ENOMEM;
+	ret = i40e_vsi_alloc_arrays(vsi, true);
+	if (ret)
 		goto err_rings;
-	}
-	vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
-
-	/* allocate memory for q_vector pointers */
-	sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
-	vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL);
-	if (!vsi->q_vectors) {
-		ret = -ENOMEM;
-		goto err_vectors;
-	}
 
 	/* Setup default MSIX irq handler for VSI */
 	i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
@@ -5092,8 +5691,6 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
 	ret = vsi_idx;
 	goto unlock_pf;
 
-err_vectors:
- 	kfree(vsi->tx_rings);
 err_rings:
 	pf->next_vsi = i - 1;
 	kfree(vsi);
@@ -5103,6 +5700,26 @@ unlock_pf:
 }
 
 /**
+ * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
+ * @type: VSI pointer
+ * @free_qvectors: a bool to specify if q_vectors need to be freed.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
+{
+	/* free the ring and vector containers */
+	if (free_qvectors) {
+		kfree(vsi->q_vectors);
+		vsi->q_vectors = NULL;
+	}
+	kfree(vsi->tx_rings);
+	vsi->tx_rings = NULL;
+	vsi->rx_rings = NULL;
+}
+
+/**
  * i40e_vsi_clear - Deallocate the VSI provided
  * @vsi: the VSI being un-configured
  **/
@@ -5138,9 +5755,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
 	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
 	i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
 
-	/* free the ring and vector containers */
-	kfree(vsi->q_vectors);
-	kfree(vsi->tx_rings);
+	i40e_vsi_free_arrays(vsi, true);
 
 	pf->vsi[vsi->idx] = NULL;
 	if (vsi->idx < pf->next_vsi)
@@ -5158,18 +5773,17 @@ free_vsi:
  * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
  * @vsi: the VSI being cleaned
  **/
-static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
 {
 	int i;
 
-	if (vsi->tx_rings[0])
+	if (vsi->tx_rings && vsi->tx_rings[0]) {
 		for (i = 0; i < vsi->alloc_queue_pairs; i++) {
 			kfree_rcu(vsi->tx_rings[i], rcu);
 			vsi->tx_rings[i] = NULL;
 			vsi->rx_rings[i] = NULL;
 		}
-
-	return 0;
+	}
 }
 
 /**
@@ -5186,6 +5800,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
 		struct i40e_ring *tx_ring;
 		struct i40e_ring *rx_ring;
 
+		/* allocate space for both Tx and Rx in one shot */
 		tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
 		if (!tx_ring)
 			goto err_out;
@@ -5289,19 +5904,22 @@ static int i40e_init_msix(struct i40e_pf *pf)
 	/* The number of vectors we'll request will be comprised of:
 	 *   - Add 1 for "other" cause for Admin Queue events, etc.
 	 *   - The number of LAN queue pairs
-	 *        already adjusted for the NUMA node
-	 *        assumes symmetric Tx/Rx pairing
+	 *	- Queues being used for RSS.
+	 *		We don't need as many as max_rss_size vectors.
+	 *		use rss_size instead in the calculation since that
+	 *		is governed by number of cpus in the system.
+	 *	- assumes symmetric Tx/Rx pairing
 	 *   - The number of VMDq pairs
 	 * Once we count this up, try the request.
 	 *
 	 * If we can't get what we want, we'll simplify to nearly nothing
 	 * and try again.  If that still fails, we punt.
 	 */
-	pf->num_lan_msix = pf->num_lan_qps;
+	pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
 	pf->num_vmdq_msix = pf->num_vmdq_qps;
 	v_budget = 1 + pf->num_lan_msix;
 	v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
-	if (pf->flags & I40E_FLAG_FDIR_ENABLED)
+	if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
 		v_budget++;
 
 	/* Scale down if necessary, and the rings will share vectors */
@@ -5437,14 +6055,13 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
 		err = i40e_init_msix(pf);
 		if (err) {
-			pf->flags &= ~(I40E_FLAG_MSIX_ENABLED	   |
-					I40E_FLAG_RSS_ENABLED	   |
-					I40E_FLAG_MQ_ENABLED	   |
-					I40E_FLAG_DCB_ENABLED	   |
-					I40E_FLAG_SRIOV_ENABLED	   |
-					I40E_FLAG_FDIR_ENABLED	   |
-					I40E_FLAG_FDIR_ATR_ENABLED |
-					I40E_FLAG_VMDQ_ENABLED);
+			pf->flags &= ~(I40E_FLAG_MSIX_ENABLED	|
+				       I40E_FLAG_RSS_ENABLED	|
+				       I40E_FLAG_DCB_ENABLED	|
+				       I40E_FLAG_SRIOV_ENABLED	|
+				       I40E_FLAG_FD_SB_ENABLED	|
+				       I40E_FLAG_FD_ATR_ENABLED	|
+				       I40E_FLAG_VMDQ_ENABLED);
 
 			/* rework the queue expectations without MSIX */
 			i40e_determine_queue_usage(pf);
@@ -5513,15 +6130,15 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
  **/
 static int i40e_config_rss(struct i40e_pf *pf)
 {
-	struct i40e_hw *hw = &pf->hw;
-	u32 lut = 0;
-	int i, j;
-	u64 hena;
 	/* Set of random keys generated using kernel random number generator */
 	static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
 				0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
 				0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
 				0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
+	struct i40e_hw *hw = &pf->hw;
+	u32 lut = 0;
+	int i, j;
+	u64 hena;
 
 	/* Fill out hash function seed */
 	for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
@@ -5530,16 +6147,7 @@ static int i40e_config_rss(struct i40e_pf *pf)
 	/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
 	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
 		((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
-	hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-		((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
-		((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
-		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
-		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
-		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-		((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
-		((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
-		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)|
-		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+	hena |= I40E_DEFAULT_RSS_HENA;
 	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
 	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
 
@@ -5568,6 +6176,34 @@ static int i40e_config_rss(struct i40e_pf *pf)
 }
 
 /**
+ * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
+ * @pf: board private structure
+ * @queue_count: the requested queue count for rss.
+ *
+ * returns 0 if rss is not enabled, if enabled returns the final rss queue
+ * count which may be different from the requested queue count.
+ **/
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
+{
+	if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
+		return 0;
+
+	queue_count = min_t(int, queue_count, pf->rss_size_max);
+	queue_count = rounddown_pow_of_two(queue_count);
+
+	if (queue_count != pf->rss_size) {
+		i40e_prep_for_reset(pf);
+
+		pf->rss_size = queue_count;
+
+		i40e_reset_and_rebuild(pf, true);
+		i40e_config_rss(pf);
+	}
+	dev_info(&pf->pdev->dev, "RSS count:  %d\n", pf->rss_size);
+	return pf->rss_size;
+}
+
+/**
  * i40e_sw_init - Initialize general software structures (struct i40e_pf)
  * @pf: board private structure to initialize
  *
@@ -5582,6 +6218,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
 
 	pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
 				(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
+	pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
 	if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
 		if (I40E_DEBUG_USER & debug)
 			pf->hw.debug_mask = debug;
@@ -5593,39 +6230,47 @@ static int i40e_sw_init(struct i40e_pf *pf)
 	pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
 		    I40E_FLAG_MSI_ENABLED     |
 		    I40E_FLAG_MSIX_ENABLED    |
-		    I40E_FLAG_RX_PS_ENABLED   |
-		    I40E_FLAG_MQ_ENABLED      |
 		    I40E_FLAG_RX_1BUF_ENABLED;
 
+	/* Depending on PF configurations, it is possible that the RSS
+	 * maximum might end up larger than the available queues
+	 */
 	pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+	pf->rss_size_max = min_t(int, pf->rss_size_max,
+				 pf->hw.func_caps.num_tx_qp);
 	if (pf->hw.func_caps.rss) {
 		pf->flags |= I40E_FLAG_RSS_ENABLED;
-		pf->rss_size = min_t(int, pf->rss_size_max,
-				     nr_cpus_node(numa_node_id()));
+		pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
+		pf->rss_size = rounddown_pow_of_two(pf->rss_size);
 	} else {
 		pf->rss_size = 1;
 	}
 
-	if (pf->hw.func_caps.dcb)
-		pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC;
-	else
-		pf->num_tc_qps = 0;
+	/* MFP mode enabled */
+	if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
+		pf->flags |= I40E_FLAG_MFP_ENABLED;
+		dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
+	}
 
-	if (pf->hw.func_caps.fd) {
-		/* FW/NVM is not yet fixed in this regard */
-		if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
-		    (pf->hw.func_caps.fd_filters_best_effort > 0)) {
-			pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED;
-			dev_info(&pf->pdev->dev,
-				 "Flow Director ATR mode Enabled\n");
-			pf->flags |= I40E_FLAG_FDIR_ENABLED;
+	/* FW/NVM is not yet fixed in this regard */
+	if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
+	    (pf->hw.func_caps.fd_filters_best_effort > 0)) {
+		pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+		pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
+		dev_info(&pf->pdev->dev,
+			"Flow Director ATR mode Enabled\n");
+		if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
+			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
 			dev_info(&pf->pdev->dev,
 				 "Flow Director Side Band mode Enabled\n");
-			pf->fdir_pf_filter_count =
-					 pf->hw.func_caps.fd_filters_guaranteed;
+		} else {
+			dev_info(&pf->pdev->dev,
+				 "Flow Director Side Band mode Disabled in MFP mode\n");
 		}
-	} else {
-		pf->fdir_pf_filter_count = 0;
+		pf->fdir_pf_filter_count =
+				 pf->hw.func_caps.fd_filters_guaranteed;
+		pf->hw.fdir_shared_filter_count =
+				 pf->hw.func_caps.fd_filters_best_effort;
 	}
 
 	if (pf->hw.func_caps.vmdq) {
@@ -5634,12 +6279,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
 		pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
 	}
 
-	/* MFP mode enabled */
-	if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
-		pf->flags |= I40E_FLAG_MFP_ENABLED;
-		dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
-	}
-
 #ifdef CONFIG_PCI_IOV
 	if (pf->hw.func_caps.num_vfs) {
 		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
@@ -5647,6 +6286,9 @@ static int i40e_sw_init(struct i40e_pf *pf)
 		pf->num_req_vfs = min_t(int,
 					pf->hw.func_caps.num_vfs,
 					I40E_MAX_VF_COUNT);
+		dev_info(&pf->pdev->dev,
+			 "Number of VFs being requested for PF[%d] = %d\n",
+			 pf->hw.pf_id, pf->num_req_vfs);
 	}
 #endif /* CONFIG_PCI_IOV */
 	pf->eeprom_version = 0xDEAD;
@@ -5701,6 +6343,104 @@ static int i40e_set_features(struct net_device *netdev,
 	return 0;
 }
 
+#ifdef CONFIG_I40E_VXLAN
+/**
+ * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
+ * @pf: board private structure
+ * @port: The UDP port to look up
+ *
+ * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
+ **/
+static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
+{
+	u8 i;
+
+	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+		if (pf->vxlan_ports[i] == port)
+			return i;
+	}
+
+	return i;
+}
+
+/**
+ * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that VXLAN is notifying us about
+ * @port: New UDP port number that VXLAN started listening to
+ **/
+static void i40e_add_vxlan_port(struct net_device *netdev,
+				sa_family_t sa_family, __be16 port)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	u8 next_idx;
+	u8 idx;
+
+	if (sa_family == AF_INET6)
+		return;
+
+	idx = i40e_get_vxlan_port_idx(pf, port);
+
+	/* Check if port already exists */
+	if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+		netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
+		return;
+	}
+
+	/* Now check if there is space to add the new port */
+	next_idx = i40e_get_vxlan_port_idx(pf, 0);
+
+	if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+		netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
+			    ntohs(port));
+		return;
+	}
+
+	/* New port: add it and mark its index in the bitmap */
+	pf->vxlan_ports[next_idx] = port;
+	pf->pending_vxlan_bitmap |= (1 << next_idx);
+
+	pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+}
+
+/**
+ * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that VXLAN is notifying us about
+ * @port: UDP port number that VXLAN stopped listening to
+ **/
+static void i40e_del_vxlan_port(struct net_device *netdev,
+				sa_family_t sa_family, __be16 port)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+	u8 idx;
+
+	if (sa_family == AF_INET6)
+		return;
+
+	idx = i40e_get_vxlan_port_idx(pf, port);
+
+	/* Check if port already exists */
+	if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+		/* if port exists, set it to 0 (mark for deletion)
+		 * and make it pending
+		 */
+		pf->vxlan_ports[idx] = 0;
+
+		pf->pending_vxlan_bitmap |= (1 << idx);
+
+		pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+	} else {
+		netdev_warn(netdev, "Port %d was not found, not deleting\n",
+			    ntohs(port));
+	}
+}
+
+#endif
 static const struct net_device_ops i40e_netdev_ops = {
 	.ndo_open		= i40e_open,
 	.ndo_stop		= i40e_close,
@@ -5710,6 +6450,7 @@ static const struct net_device_ops i40e_netdev_ops = {
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_set_mac_address	= i40e_set_mac,
 	.ndo_change_mtu		= i40e_change_mtu,
+	.ndo_do_ioctl		= i40e_ioctl,
 	.ndo_tx_timeout		= i40e_tx_timeout,
 	.ndo_vlan_rx_add_vid	= i40e_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid	= i40e_vlan_rx_kill_vid,
@@ -5722,6 +6463,10 @@ static const struct net_device_ops i40e_netdev_ops = {
 	.ndo_set_vf_vlan	= i40e_ndo_set_vf_port_vlan,
 	.ndo_set_vf_tx_rate	= i40e_ndo_set_vf_bw,
 	.ndo_get_vf_config	= i40e_ndo_get_vf_config,
+#ifdef CONFIG_I40E_VXLAN
+	.ndo_add_vxlan_port	= i40e_add_vxlan_port,
+	.ndo_del_vxlan_port	= i40e_del_vxlan_port,
+#endif
 };
 
 /**
@@ -5732,6 +6477,7 @@ static const struct net_device_ops i40e_netdev_ops = {
  **/
 static int i40e_config_netdev(struct i40e_vsi *vsi)
 {
+	u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 	struct i40e_pf *pf = vsi->back;
 	struct i40e_hw *hw = &pf->hw;
 	struct i40e_netdev_priv *np;
@@ -5781,6 +6527,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
 		random_ether_addr(mac_addr);
 		i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
 	}
+	i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
 
 	memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
 	memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
@@ -5814,10 +6561,6 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi)
 	if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
 		return;
 
-	/* there is no HW VSI for FDIR */
-	if (vsi->type == I40E_VSI_FDIR)
-		return;
-
 	i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
 	return;
 }
@@ -5901,12 +6644,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
 		break;
 
 	case I40E_VSI_FDIR:
-		/* no queue mapping or actual HW VSI needed */
-		vsi->info.valid_sections = 0;
-		vsi->seid = 0;
-		vsi->id = 0;
+		ctxt.pf_num = hw->pf_id;
+		ctxt.vf_num = 0;
+		ctxt.uplink_seid = vsi->uplink_seid;
+		ctxt.connection_type = 0x1;     /* regular data port */
+		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
-		return 0;
 		break;
 
 	case I40E_VSI_VMDQ2:
@@ -6133,6 +6876,69 @@ vector_setup_out:
 }
 
 /**
+ * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
+ * @vsi: pointer to the vsi.
+ *
+ * This re-allocates a vsi's queue resources.
+ *
+ * Returns pointer to the successfully allocated and configured VSI sw struct
+ * on success, otherwise returns NULL on failure.
+ **/
+static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
+{
+	struct i40e_pf *pf = vsi->back;
+	u8 enabled_tc;
+	int ret;
+
+	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
+	i40e_vsi_clear_rings(vsi);
+
+	i40e_vsi_free_arrays(vsi, false);
+	i40e_set_num_rings_in_vsi(vsi);
+	ret = i40e_vsi_alloc_arrays(vsi, false);
+	if (ret)
+		goto err_vsi;
+
+	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+	if (ret < 0) {
+		dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
+			 vsi->seid, ret);
+		goto err_vsi;
+	}
+	vsi->base_queue = ret;
+
+	/* Update the FW view of the VSI. Force a reset of TC and queue
+	 * layout configurations.
+	 */
+	enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+	pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
+	pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
+	i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+
+	/* assign it some queues */
+	ret = i40e_alloc_rings(vsi);
+	if (ret)
+		goto err_rings;
+
+	/* map all of the rings to the q_vectors */
+	i40e_vsi_map_rings_to_vectors(vsi);
+	return vsi;
+
+err_rings:
+	i40e_vsi_free_q_vectors(vsi);
+	if (vsi->netdev_registered) {
+		vsi->netdev_registered = false;
+		unregister_netdev(vsi->netdev);
+		free_netdev(vsi->netdev);
+		vsi->netdev = NULL;
+	}
+	i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
+err_vsi:
+	i40e_vsi_clear(vsi);
+	return NULL;
+}
+
+/**
  * i40e_vsi_setup - Set up a VSI by a given type
  * @pf: board private structure
  * @type: VSI type
@@ -6212,6 +7018,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
 	if (v_idx < 0)
 		goto err_alloc;
 	vsi = pf->vsi[v_idx];
+	if (!vsi)
+		goto err_alloc;
 	vsi->type = type;
 	vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
 
@@ -6220,7 +7028,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
 	else if (type == I40E_VSI_SRIOV)
 		vsi->vf_id = param1;
 	/* assign it some queues */
-	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
+				vsi->idx);
 	if (ret < 0) {
 		dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
 			 vsi->seid, ret);
@@ -6246,6 +7055,10 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
 			goto err_netdev;
 		vsi->netdev_registered = true;
 		netif_carrier_off(vsi->netdev);
+#ifdef CONFIG_I40E_DCB
+		/* Setup DCB netlink interface */
+		i40e_dcbnl_setup(vsi);
+#endif /* CONFIG_I40E_DCB */
 		/* fall through */
 
 	case I40E_VSI_FDIR:
@@ -6503,12 +7316,14 @@ void i40e_veb_release(struct i40e_veb *veb)
  **/
 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
 {
-	bool is_default = (vsi->idx == vsi->back->lan_vsi);
+	bool is_default = false;
+	bool is_cloud = false;
 	int ret;
 
 	/* get a VEB from the hardware */
 	ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
-			      veb->enabled_tc, is_default, &veb->seid, NULL);
+			      veb->enabled_tc, is_default,
+			      is_cloud, &veb->seid, NULL);
 	if (ret) {
 		dev_info(&veb->pf->pdev->dev,
 			 "couldn't add VEB, err %d, aq_err %d\n",
@@ -6773,11 +7588,13 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
 /**
  * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
  * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
  *
  * Returns 0 on success, negative value on failure
  **/
-static int i40e_setup_pf_switch(struct i40e_pf *pf)
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
 {
+	u32 rxfc = 0, txfc = 0, rxfc_reg;
 	int ret;
 
 	/* find out what's out there already */
@@ -6790,14 +7607,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
 	}
 	i40e_pf_reset_stats(pf);
 
-	/* fdir VSI must happen first to be sure it gets queue 0, but only
-	 * if there is enough room for the fdir VSI
-	 */
-	if (pf->num_lan_qps > 1)
-		i40e_fdir_setup(pf);
-
 	/* first time setup */
-	if (pf->lan_vsi == I40E_NO_VSI) {
+	if (pf->lan_vsi == I40E_NO_VSI || reinit) {
 		struct i40e_vsi *vsi = NULL;
 		u16 uplink_seid;
 
@@ -6808,19 +7619,15 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
 			uplink_seid = pf->veb[pf->lan_veb]->seid;
 		else
 			uplink_seid = pf->mac_seid;
-
-		vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+		if (pf->lan_vsi == I40E_NO_VSI)
+			vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+		else if (reinit)
+			vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
 		if (!vsi) {
 			dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
 			i40e_fdir_teardown(pf);
 			return -EAGAIN;
 		}
-		/* accommodate kcompat by copying the main VSI queue count
-		 * into the pf, since this newer code pushes the pf queue
-		 * info down a level into a VSI
-		 */
-		pf->num_rx_queues = vsi->alloc_queue_pairs;
-		pf->num_tx_queues = vsi->alloc_queue_pairs;
 	} else {
 		/* force a reset of TC and queue layout configurations */
 		u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
@@ -6830,6 +7637,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
 	}
 	i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
 
+	i40e_fdir_sb_setup(pf);
+
 	/* Setup static PF queue filter control settings */
 	ret = i40e_setup_pf_filter_control(pf);
 	if (ret) {
@@ -6848,37 +7657,68 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
 	i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
 	i40e_link_event(pf);
 
-	/* Initialize user-specifics link properties */
+	/* Initialize user-specific link properties */
 	pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
 				  I40E_AQ_AN_COMPLETED) ? true : false);
-	pf->hw.fc.requested_mode = I40E_FC_DEFAULT;
-	if (pf->hw.phy.link_info.an_info &
-	   (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX))
+	/* requested_mode is set in probe or by ethtool */
+	if (!pf->fc_autoneg_status)
+		goto no_autoneg;
+
+	if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) &&
+	    (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX))
 		pf->hw.fc.current_mode = I40E_FC_FULL;
 	else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
 		pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
 	else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
 		pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
 	else
-		pf->hw.fc.current_mode = I40E_FC_DEFAULT;
+		pf->hw.fc.current_mode = I40E_FC_NONE;
 
-	return ret;
-}
+	/* sync the flow control settings with the auto-neg values */
+	switch (pf->hw.fc.current_mode) {
+	case I40E_FC_FULL:
+		txfc = 1;
+		rxfc = 1;
+		break;
+	case I40E_FC_TX_PAUSE:
+		txfc = 1;
+		rxfc = 0;
+		break;
+	case I40E_FC_RX_PAUSE:
+		txfc = 0;
+		rxfc = 1;
+		break;
+	case I40E_FC_NONE:
+	case I40E_FC_DEFAULT:
+		txfc = 0;
+		rxfc = 0;
+		break;
+	case I40E_FC_PFC:
+		/* TBD */
+		break;
+	/* no default case, we have to handle all possibilities here */
+	}
 
-/**
- * i40e_set_rss_size - helper to set rss_size
- * @pf: board private structure
- * @queues_left: how many queues
- */
-static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
-{
-	int num_tc0;
+	wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
+
+	rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
+		   ~I40E_PRTDCB_MFLCN_RFCE_MASK;
+	rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT);
+
+	wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg);
 
-	num_tc0 = min_t(int, queues_left, pf->rss_size_max);
-	num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id()));
-	num_tc0 = rounddown_pow_of_two(num_tc0);
+	goto fc_complete;
 
-	return num_tc0;
+no_autoneg:
+	/* disable L2 flow control, user can turn it on if they wish */
+	wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0);
+	wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
+					 ~I40E_PRTDCB_MFLCN_RFCE_MASK);
+
+fc_complete:
+	i40e_ptp_init(pf);
+
+	return ret;
 }
 
 /**
@@ -6887,12 +7727,9 @@ static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
  **/
 static void i40e_determine_queue_usage(struct i40e_pf *pf)
 {
-	int accum_tc_size;
 	int queues_left;
 
 	pf->num_lan_qps = 0;
-	pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps);
-	accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps;
 
 	/* Find the max queues to be put into basic use.  We'll always be
 	 * using TC0, whether or not DCB is running, and TC0 will get the
@@ -6900,99 +7737,45 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
 	 */
 	queues_left = pf->hw.func_caps.num_tx_qp;
 
-	if   (!((pf->flags & I40E_FLAG_MSIX_ENABLED)		 &&
-		(pf->flags & I40E_FLAG_MQ_ENABLED))		 ||
-		!(pf->flags & (I40E_FLAG_RSS_ENABLED |
-		I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) ||
-		(queues_left == 1)) {
-
+	if ((queues_left == 1) ||
+	    !(pf->flags & I40E_FLAG_MSIX_ENABLED) ||
+	    !(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED |
+			   I40E_FLAG_DCB_ENABLED))) {
 		/* one qp for PF, no queues for anything else */
 		queues_left = 0;
 		pf->rss_size = pf->num_lan_qps = 1;
 
 		/* make sure all the fancies are disabled */
-		pf->flags &= ~(I40E_FLAG_RSS_ENABLED       |
-				I40E_FLAG_MQ_ENABLED	   |
-				I40E_FLAG_FDIR_ENABLED	   |
-				I40E_FLAG_FDIR_ATR_ENABLED |
-				I40E_FLAG_DCB_ENABLED	   |
-				I40E_FLAG_SRIOV_ENABLED	   |
-				I40E_FLAG_VMDQ_ENABLED);
-
-	} else if (pf->flags & I40E_FLAG_RSS_ENABLED	  &&
-		   !(pf->flags & I40E_FLAG_FDIR_ENABLED)  &&
-		   !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
-		pf->rss_size = i40e_set_rss_size(pf, queues_left);
-
-		queues_left -= pf->rss_size;
-		pf->num_lan_qps = pf->rss_size;
-
-	} else if (pf->flags & I40E_FLAG_RSS_ENABLED	  &&
-		   !(pf->flags & I40E_FLAG_FDIR_ENABLED)  &&
-		   (pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
-		/* save num_tc_qps queues for TCs 1 thru 7 and the rest
-		 * are set up for RSS in TC0
-		 */
-		queues_left -= accum_tc_size;
-
-		pf->rss_size = i40e_set_rss_size(pf, queues_left);
-
-		queues_left -= pf->rss_size;
-		if (queues_left < 0) {
-			dev_info(&pf->pdev->dev, "not enough queues for DCB\n");
-			return;
-		}
-
-		pf->num_lan_qps = pf->rss_size + accum_tc_size;
-
-	} else if (pf->flags & I40E_FLAG_RSS_ENABLED   &&
-		  (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
-		  !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
-		queues_left -= 1; /* save 1 queue for FD */
-
-		pf->rss_size = i40e_set_rss_size(pf, queues_left);
-
-		queues_left -= pf->rss_size;
-		if (queues_left < 0) {
-			dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n");
-			return;
+		pf->flags &= ~(I40E_FLAG_RSS_ENABLED	|
+			       I40E_FLAG_FD_SB_ENABLED	|
+			       I40E_FLAG_FD_ATR_ENABLED	|
+			       I40E_FLAG_DCB_ENABLED	|
+			       I40E_FLAG_SRIOV_ENABLED	|
+			       I40E_FLAG_VMDQ_ENABLED);
+	} else {
+		/* Not enough queues for all TCs */
+		if ((pf->flags & I40E_FLAG_DCB_ENABLED) &&
+		    (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
+			pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+			dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
 		}
+		pf->num_lan_qps = pf->rss_size_max;
+		queues_left -= pf->num_lan_qps;
+	}
 
-		pf->num_lan_qps = pf->rss_size;
-
-	} else if (pf->flags & I40E_FLAG_RSS_ENABLED   &&
-		  (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
-		  (pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
-		/* save 1 queue for TCs 1 thru 7,
-		 * 1 queue for flow director,
-		 * and the rest are set up for RSS in TC0
-		 */
-		queues_left -= 1;
-		queues_left -= accum_tc_size;
-
-		pf->rss_size = i40e_set_rss_size(pf, queues_left);
-		queues_left -= pf->rss_size;
-		if (queues_left < 0) {
-			dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n");
-			return;
+	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+		if (queues_left > 1) {
+			queues_left -= 1; /* save 1 queue for FD */
+		} else {
+			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+			dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
 		}
-
-		pf->num_lan_qps = pf->rss_size + accum_tc_size;
-
-	} else {
-		dev_info(&pf->pdev->dev,
-			 "Invalid configuration, flags=0x%08llx\n", pf->flags);
-		return;
 	}
 
 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
 	    pf->num_vf_qps && pf->num_req_vfs && queues_left) {
-		pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left /
-							       pf->num_vf_qps));
+		pf->num_req_vfs = min_t(int, pf->num_req_vfs,
+					(queues_left / pf->num_vf_qps));
 		queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
 	}
 
@@ -7003,6 +7786,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
 		queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
 	}
 
+	pf->queues_left = queues_left;
 	return;
 }
 
@@ -7024,7 +7808,7 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
 	settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
 
 	/* Flow Director is enabled */
-	if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED))
+	if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
 		settings->enable_fdir = true;
 
 	/* Ethtype and MACVLAN filters enabled for PF */
@@ -7053,6 +7837,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	struct i40e_driver_version dv;
 	struct i40e_pf *pf;
 	struct i40e_hw *hw;
+	static u16 pfs_found;
+	u16 link_status;
 	int err = 0;
 	u32 len;
 
@@ -7118,6 +7904,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	hw->subsystem_device_id = pdev->subsystem_device;
 	hw->bus.device = PCI_SLOT(pdev->devfn);
 	hw->bus.func = PCI_FUNC(pdev->devfn);
+	pf->instance = pfs_found;
+
+	/* do a special CORER for clearing PXE mode once at init */
+	if (hw->revision_id == 0 &&
+	    (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
+		wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
+		i40e_flush(hw);
+		msleep(200);
+		pf->corer_count++;
+
+		i40e_clear_pxe_mode(hw);
+	}
 
 	/* Reset here to make sure all is clean and to define PF 'n' */
 	err = i40e_pf_reset(hw);
@@ -7142,8 +7940,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto err_pf_reset;
 	}
 
+	/* set up a default setting for link flow control */
+	pf->hw.fc.requested_mode = I40E_FC_NONE;
+
 	err = i40e_init_adminq(hw);
 	dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
+	if (((hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
+		 >> I40E_NVM_VERSION_HI_SHIFT) != I40E_CURRENT_NVM_VERSION_HI) {
+		dev_info(&pdev->dev,
+			 "warning: NVM version not supported, supported version: %02x.%02x\n",
+			 I40E_CURRENT_NVM_VERSION_HI,
+			 I40E_CURRENT_NVM_VERSION_LO);
+	}
 	if (err) {
 		dev_info(&pdev->dev,
 			 "init_adminq failed: %d expecting API %02x.%02x\n",
@@ -7152,6 +7960,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto err_pf_reset;
 	}
 
+	i40e_clear_pxe_mode(hw);
 	err = i40e_get_capabilities(pf);
 	if (err)
 		goto err_adminq_setup;
@@ -7178,7 +7987,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	}
 
 	i40e_get_mac_addr(hw, hw->mac.addr);
-	if (i40e_validate_mac_addr(hw->mac.addr)) {
+	if (!is_valid_ether_addr(hw->mac.addr)) {
 		dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
 		err = -EIO;
 		goto err_mac_addr;
@@ -7188,6 +7997,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	pci_set_drvdata(pdev, pf);
 	pci_save_state(pdev);
+#ifdef CONFIG_I40E_DCB
+	err = i40e_init_pf_dcb(pf);
+	if (err) {
+		dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
+		pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+		goto err_init_dcb;
+	}
+#endif /* CONFIG_I40E_DCB */
 
 	/* set up periodic task facility */
 	setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
@@ -7198,6 +8015,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
 	pf->link_check_timeout = jiffies;
 
+	/* WoL defaults to disabled */
+	pf->wol_en = false;
+	device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
+
 	/* set up the main switch operations */
 	i40e_determine_queue_usage(pf);
 	i40e_init_interrupt_scheme(pf);
@@ -7212,7 +8033,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto err_switch_setup;
 	}
 
-	err = i40e_setup_pf_switch(pf);
+	err = i40e_setup_pf_switch(pf, false);
 	if (err) {
 		dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
 		goto err_vsis;
@@ -7250,6 +8071,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		i40e_flush(hw);
 	}
 
+	pfs_found++;
+
 	i40e_dbg_pf_init(pf);
 
 	/* tell the firmware that we're starting */
@@ -7263,15 +8086,41 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	mod_timer(&pf->service_timer,
 		  round_jiffies(jiffies + pf->service_timer_period));
 
+	/* Get the negotiated link width and speed from PCI config space */
+	pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
+
+	i40e_set_pci_config_data(hw, link_status);
+
+	dev_info(&pdev->dev, "PCI Express: %s %s\n",
+		(hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
+		 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
+		 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
+		 "Unknown"),
+		(hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
+		 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
+		 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
+		 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
+		 "Unknown"));
+
+	if (hw->bus.width < i40e_bus_width_pcie_x8 ||
+	    hw->bus.speed < i40e_bus_speed_8000) {
+		dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
+		dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+	}
+
 	return 0;
 
 	/* Unwind what we've done if something failed in the setup */
 err_vsis:
 	set_bit(__I40E_DOWN, &pf->state);
-err_switch_setup:
 	i40e_clear_interrupt_scheme(pf);
 	kfree(pf->vsi);
+err_switch_setup:
+	i40e_reset_interrupt_capability(pf);
 	del_timer_sync(&pf->service_timer);
+#ifdef CONFIG_I40E_DCB
+err_init_dcb:
+#endif /* CONFIG_I40E_DCB */
 err_mac_addr:
 err_configure_lan_hmc:
 	(void)i40e_shutdown_lan_hmc(hw);
@@ -7313,6 +8162,8 @@ static void i40e_remove(struct pci_dev *pdev)
 
 	i40e_dbg_pf_exit(pf);
 
+	i40e_ptp_stop(pf);
+
 	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
 		i40e_free_vfs(pf);
 		pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
@@ -7356,7 +8207,6 @@ static void i40e_remove(struct pci_dev *pdev)
 			 "Failed to destroy the HMC resources: %d\n", ret_code);
 
 	/* shutdown the adminq */
-	i40e_aq_queue_shutdown(&pf->hw, true);
 	ret_code = i40e_shutdown_adminq(&pf->hw);
 	if (ret_code)
 		dev_warn(&pdev->dev,
@@ -7413,7 +8263,11 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
 	dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
 
 	/* shutdown all operations */
-	i40e_pf_quiesce_all_vsi(pf);
+	if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
+		rtnl_lock();
+		i40e_prep_for_reset(pf);
+		rtnl_unlock();
+	}
 
 	/* Request a slot reset */
 	return PCI_ERS_RESULT_NEED_RESET;
@@ -7476,9 +8330,103 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
 	struct i40e_pf *pf = pci_get_drvdata(pdev);
 
 	dev_info(&pdev->dev, "%s\n", __func__);
+	if (test_bit(__I40E_SUSPENDED, &pf->state))
+		return;
+
+	rtnl_lock();
 	i40e_handle_reset_warning(pf);
+	rtnl_lock();
 }
 
+/**
+ * i40e_shutdown - PCI callback for shutting down
+ * @pdev: PCI device information struct
+ **/
+static void i40e_shutdown(struct pci_dev *pdev)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_hw *hw = &pf->hw;
+
+	set_bit(__I40E_SUSPENDED, &pf->state);
+	set_bit(__I40E_DOWN, &pf->state);
+	rtnl_lock();
+	i40e_prep_for_reset(pf);
+	rtnl_unlock();
+
+	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+	if (system_state == SYSTEM_POWER_OFF) {
+		pci_wake_from_d3(pdev, pf->wol_en);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+}
+
+#ifdef CONFIG_PM
+/**
+ * i40e_suspend - PCI callback for moving to D3
+ * @pdev: PCI device information struct
+ **/
+static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_hw *hw = &pf->hw;
+
+	set_bit(__I40E_SUSPENDED, &pf->state);
+	set_bit(__I40E_DOWN, &pf->state);
+	rtnl_lock();
+	i40e_prep_for_reset(pf);
+	rtnl_unlock();
+
+	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+	pci_wake_from_d3(pdev, pf->wol_en);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return 0;
+}
+
+/**
+ * i40e_resume - PCI callback for waking up from D3
+ * @pdev: PCI device information struct
+ **/
+static int i40e_resume(struct pci_dev *pdev)
+{
+	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	u32 err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	/* pci_restore_state() clears dev->state_saves, so
+	 * call pci_save_state() again to restore it.
+	 */
+	pci_save_state(pdev);
+
+	err = pci_enable_device_mem(pdev);
+	if (err) {
+		dev_err(&pdev->dev,
+			"%s: Cannot enable PCI device from suspend\n",
+			__func__);
+		return err;
+	}
+	pci_set_master(pdev);
+
+	/* no wakeup events while running */
+	pci_wake_from_d3(pdev, false);
+
+	/* handling the reset will rebuild the device state */
+	if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
+		clear_bit(__I40E_DOWN, &pf->state);
+		rtnl_lock();
+		i40e_reset_and_rebuild(pf, false);
+		rtnl_unlock();
+	}
+
+	return 0;
+}
+
+#endif
 static const struct pci_error_handlers i40e_err_handler = {
 	.error_detected = i40e_pci_error_detected,
 	.slot_reset = i40e_pci_error_slot_reset,
@@ -7490,6 +8438,11 @@ static struct pci_driver i40e_driver = {
 	.id_table = i40e_pci_tbl,
 	.probe    = i40e_probe,
 	.remove   = i40e_remove,
+#ifdef CONFIG_PM
+	.suspend  = i40e_suspend,
+	.resume   = i40e_resume,
+#endif
+	.shutdown = i40e_shutdown,
 	.err_handler = &i40e_err_handler,
 	.sriov_configure = i40e_pci_sriov_configure,
 };
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 97e1bb30ef8a..73f95b081927 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -166,15 +165,15 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
 }
 
 /**
- *  i40e_read_nvm_srctl - Reads Shadow RAM.
+ *  i40e_read_nvm_word - Reads Shadow RAM
  *  @hw: pointer to the HW structure.
  *  @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  *  @data: word read from the Shadow RAM.
  *
  *  Reads 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
  **/
-static i40e_status i40e_read_nvm_srctl(struct i40e_hw *hw, u16 offset,
-						 u16 *data)
+i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+					 u16 *data)
 {
 	i40e_status ret_code = I40E_ERR_TIMEOUT;
 	u32 sr_reg;
@@ -211,29 +210,6 @@ read_nvm_exit:
 }
 
 /**
- *  i40e_read_nvm_word - Reads Shadow RAM word.
- *  @hw: pointer to the HW structure.
- *  @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- *  @data: word read from the Shadow RAM.
- *
- *  Reads 16 bit word from the Shadow RAM. Each read is preceded
- *  with the NVM ownership taking and followed by the release.
- **/
-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
-					 u16 *data)
-{
-	i40e_status ret_code = 0;
-
-	ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
-	if (!ret_code) {
-		ret_code = i40e_read_nvm_srctl(hw, offset, data);
-		i40e_release_nvm(hw);
-	}
-
-	return ret_code;
-}
-
-/**
  *  i40e_read_nvm_buffer - Reads Shadow RAM buffer.
  *  @hw: pointer to the HW structure.
  *  @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
@@ -250,36 +226,25 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
 {
 	i40e_status ret_code = 0;
 	u16 index, word;
-	u32 time;
 
-	ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
-	if (!ret_code) {
-		/* Loop thru the selected region. */
-		for (word = 0; word < *words; word++) {
-			index = offset + word;
-			ret_code = i40e_read_nvm_srctl(hw, index, &data[word]);
-			if (ret_code)
-				break;
-			/* Check if we didn't exceeded the semaphore timeout. */
-			time = rd32(hw, I40E_GLVFGEN_TIMER);
-			if (time >= hw->nvm.hw_semaphore_timeout) {
-				ret_code = I40E_ERR_TIMEOUT;
-				hw_dbg(hw, "NVM read error: timeout.\n");
-				break;
-			}
-		}
-		/* Update the number of words read from the Shadow RAM. */
-		*words = word;
-		/* Release the NVM ownership. */
-		i40e_release_nvm(hw);
+	/* Loop thru the selected region. */
+	for (word = 0; word < *words; word++) {
+		index = offset + word;
+		ret_code = i40e_read_nvm_word(hw, index, &data[word]);
+		if (ret_code)
+			break;
 	}
 
+	/* Update the number of words read from the Shadow RAM. */
+	*words = word;
+
 	return ret_code;
 }
 
 /**
  *  i40e_calc_nvm_checksum - Calculates and returns the checksum
  *  @hw: pointer to hardware structure
+ *  @checksum: pointer to the checksum
  *
  *  This function calculate SW Checksum that covers the whole 64kB shadow RAM
  *  except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
@@ -297,14 +262,14 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
 	u32 i = 0;
 
 	/* read pointer to VPD area */
-	ret_code = i40e_read_nvm_srctl(hw, I40E_SR_VPD_PTR, &vpd_module);
+	ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
 	if (ret_code) {
 		ret_code = I40E_ERR_NVM_CHECKSUM;
 		goto i40e_calc_nvm_checksum_exit;
 	}
 
 	/* read pointer to PCIe Alt Auto-load module */
-	ret_code = i40e_read_nvm_srctl(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+	ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
 				       &pcie_alt_module);
 	if (ret_code) {
 		ret_code = I40E_ERR_NVM_CHECKSUM;
@@ -331,7 +296,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
 				break;
 		}
 
-		ret_code = i40e_read_nvm_srctl(hw, (u16)i, &word);
+		ret_code = i40e_read_nvm_word(hw, (u16)i, &word);
 		if (ret_code) {
 			ret_code = I40E_ERR_NVM_CHECKSUM;
 			goto i40e_calc_nvm_checksum_exit;
@@ -358,7 +323,7 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
 {
 	i40e_status ret_code = 0;
 	u16 checksum_sr = 0;
-	u16 checksum_local;
+	u16 checksum_local = 0;
 
 	ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
 	if (ret_code)
@@ -371,7 +336,7 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
 	/* Do not use i40e_read_nvm_word() because we do not want to take
 	 * the synchronization semaphores twice here.
 	 */
-	i40e_read_nvm_srctl(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+	i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
 
 	/* Verify read checksum from EEPROM is the same as
 	 * calculated checksum
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 702c81ba86e3..ecd0f0b663c9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index f75bb9ccc900..ed91f93ede2b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -51,7 +50,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
 				void *buff, /* can be NULL */
 				u16  buff_size,
 				struct i40e_asq_cmd_details *cmd_details);
-bool i40e_asq_done(struct i40e_hw *hw);
 
 /* debug function for adminq */
 void i40e_debug_aq(struct i40e_hw *hw,
@@ -60,10 +58,11 @@ void i40e_debug_aq(struct i40e_hw *hw,
 		   void *buffer);
 
 void i40e_idle_aq(struct i40e_hw *hw);
-void i40e_resume_aq(struct i40e_hw *hw);
+bool i40e_check_asq_alive(struct i40e_hw *hw);
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
 
 u32 i40e_led_get(struct i40e_hw *hw);
-void i40e_led_set(struct i40e_hw *hw, u32 mode);
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
 
 /* admin send queue commands */
 
@@ -71,8 +70,6 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
 				u16 *fw_major_version, u16 *fw_minor_version,
 				u16 *api_major_version, u16 *api_minor_version,
 				struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
-					     bool unloading);
 i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
@@ -95,9 +92,9 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
 				u16 vsi_id, bool set_filter,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
-				u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+		u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
-				u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+		u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
 				struct i40e_vsi_context *vsi_ctx,
 				struct i40e_asq_cmd_details *cmd_details);
@@ -106,7 +103,8 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
 				u16 downlink_seid, u8 enabled_tc,
-				bool default_port, u16 *pveb_seid,
+				bool default_port, bool enable_l2_filtering,
+				u16 *pveb_seid,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
 				u16 veb_seid, u16 *switch_id, bool *floating,
@@ -119,12 +117,6 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
 i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
 			struct i40e_aqc_remove_macvlan_element_data *mv_list,
 			u16 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
-			struct i40e_aqc_add_remove_vlan_element_data *v_list,
-			u8 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
-			struct i40e_aqc_add_remove_vlan_element_data *v_list,
-			u8 count, struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
 				u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
 				struct i40e_asq_cmd_details *cmd_details);
@@ -164,11 +156,19 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
 				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+				u16 udp_port, u8 header_len,
+				u8 protocol_index, u8 *filter_index,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
 				    u16 flags, u8 *mac_addr,
 				    struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
+				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
 				enum i40e_aq_hmc_profile profile,
 				u8 pe_vf_enabled_count,
@@ -179,6 +179,15 @@ i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
 i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
 			struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
 			struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+		u16 seid,
+		struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+		enum i40e_admin_queue_opc opcode,
+		struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+	u16 seid,
+	struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+	struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
 			u16 seid,
 			struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
@@ -207,8 +216,6 @@ bool i40e_get_link_status(struct i40e_hw *hw);
 i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
 						u8 *mac_addr);
 i40e_status i40e_validate_mac_addr(u8 *mac_addr);
-i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
-					struct i40e_lldp_variables *lldp_cfg);
 /* prototype for functions used for NVM access */
 i40e_status i40e_init_nvm(struct i40e_hw *hw);
 i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
@@ -222,6 +229,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
 					   u16 *words, u16 *data);
 i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
 						 u16 *checksum);
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
 
 /* prototype for functions used for SW locks */
 
@@ -236,4 +244,9 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_set_filter_control(struct i40e_hw *hw,
 				struct i40e_filter_control_settings *settings);
+i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+				u8 *mac_addr, u16 ethtype, u16 flags,
+				u16 vsi_seid, u16 queue, bool is_add,
+				struct i40e_control_filter_stats *stats,
+				struct i40e_asq_cmd_details *cmd_details);
 #endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
new file mode 100644
index 000000000000..e33ec6c842b7
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -0,0 +1,662 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e.h"
+#include <linux/export.h>
+#include <linux/ptp_classify.h>
+
+/* The XL710 timesync is very much like Intel's 82599 design when it comes to
+ * the fundamental clock design. However, the clock operations are much simpler
+ * in the XL710 because the device supports a full 64 bits of nanoseconds.
+ * Because the field is so wide, we can forgo the cycle counter and just
+ * operate with the nanosecond field directly without fear of overflow.
+ *
+ * Much like the 82599, the update period is dependent upon the link speed:
+ * At 40Gb link or no link, the period is 1.6ns.
+ * At 10Gb link, the period is multiplied by 2. (3.2ns)
+ * At 1Gb link, the period is multiplied by 20. (32ns)
+ * 1588 functionality is not supported at 100Mbps.
+ */
+#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
+#define I40E_PTP_10GB_INCVAL 0x0333333333ULL
+#define I40E_PTP_1GB_INCVAL  0x2000000000ULL
+
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1  (0x1 << \
+					I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2  (0x2 << \
+					I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PTP_TX_TIMEOUT  (HZ * 15)
+
+/**
+ * i40e_ptp_read - Read the PHC time from the device
+ * @pf: Board private structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * This function reads the PRTTSYN_TIME registers and stores them in a
+ * timespec. However, since the registers are 64 bits of nanoseconds, we must
+ * convert the result to a timespec before we can return.
+ **/
+static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts)
+{
+	struct i40e_hw *hw = &pf->hw;
+	u32 hi, lo;
+	u64 ns;
+
+	/* The timer latches on the lowest register read. */
+	lo = rd32(hw, I40E_PRTTSYN_TIME_L);
+	hi = rd32(hw, I40E_PRTTSYN_TIME_H);
+
+	ns = (((u64)hi) << 32) | lo;
+
+	*ts = ns_to_timespec(ns);
+}
+
+/**
+ * i40e_ptp_write - Write the PHC time to the device
+ * @pf: Board private structure
+ * @ts: timespec structure that holds the new time value
+ *
+ * This function writes the PRTTSYN_TIME registers with the user value. Since
+ * we receive a timespec from the stack, we must convert that timespec into
+ * nanoseconds before programming the registers.
+ **/
+static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec *ts)
+{
+	struct i40e_hw *hw = &pf->hw;
+	u64 ns = timespec_to_ns(ts);
+
+	/* The timer will not update until the high register is written, so
+	 * write the low register first.
+	 */
+	wr32(hw, I40E_PRTTSYN_TIME_L, ns & 0xFFFFFFFF);
+	wr32(hw, I40E_PRTTSYN_TIME_H, ns >> 32);
+}
+
+/**
+ * i40e_ptp_convert_to_hwtstamp - Convert device clock to system time
+ * @hwtstamps: Timestamp structure to update
+ * @timestamp: Timestamp from the hardware
+ *
+ * We need to convert the NIC clock value into a hwtstamp which can be used by
+ * the upper level timestamping functions. Since the timestamp is simply a 64-
+ * bit nanosecond value, we can call ns_to_ktime directly to handle this.
+ **/
+static void i40e_ptp_convert_to_hwtstamp(struct skb_shared_hwtstamps *hwtstamps,
+					 u64 timestamp)
+{
+	memset(hwtstamps, 0, sizeof(*hwtstamps));
+
+	hwtstamps->hwtstamp = ns_to_ktime(timestamp);
+}
+
+/**
+ * i40e_ptp_adjfreq - Adjust the PHC frequency
+ * @ptp: The PTP clock structure
+ * @ppb: Parts per billion adjustment from the base
+ *
+ * Adjust the frequency of the PHC by the indicated parts per billion from the
+ * base frequency.
+ **/
+static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+	struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+	struct i40e_hw *hw = &pf->hw;
+	u64 adj, freq, diff;
+	int neg_adj = 0;
+
+	if (ppb < 0) {
+		neg_adj = 1;
+		ppb = -ppb;
+	}
+
+	smp_mb(); /* Force any pending update before accessing. */
+	adj = ACCESS_ONCE(pf->ptp_base_adj);
+
+	freq = adj;
+	freq *= ppb;
+	diff = div_u64(freq, 1000000000ULL);
+
+	if (neg_adj)
+		adj -= diff;
+	else
+		adj += diff;
+
+	wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
+	wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
+
+	return 0;
+}
+
+/**
+ * i40e_ptp_adjtime - Adjust the PHC time
+ * @ptp: The PTP clock structure
+ * @delta: Offset in nanoseconds to adjust the PHC time by
+ *
+ * Adjust the frequency of the PHC by the indicated parts per billion from the
+ * base frequency.
+ **/
+static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+	struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+	struct timespec now, then = ns_to_timespec(delta);
+	unsigned long flags;
+
+	spin_lock_irqsave(&pf->tmreg_lock, flags);
+
+	i40e_ptp_read(pf, &now);
+	now = timespec_add(now, then);
+	i40e_ptp_write(pf, (const struct timespec *)&now);
+
+	spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+
+	return 0;
+}
+
+/**
+ * i40e_ptp_gettime - Get the time of the PHC
+ * @ptp: The PTP clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * Read the device clock and return the correct value on ns, after converting it
+ * into a timespec struct.
+ **/
+static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+	struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+	unsigned long flags;
+
+	spin_lock_irqsave(&pf->tmreg_lock, flags);
+	i40e_ptp_read(pf, ts);
+	spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+
+	return 0;
+}
+
+/**
+ * i40e_ptp_settime - Set the time of the PHC
+ * @ptp: The PTP clock structure
+ * @ts: timespec structure that holds the new time value
+ *
+ * Set the device clock to the user input value. The conversion from timespec
+ * to ns happens in the write function.
+ **/
+static int i40e_ptp_settime(struct ptp_clock_info *ptp,
+			    const struct timespec *ts)
+{
+	struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+	unsigned long flags;
+
+	spin_lock_irqsave(&pf->tmreg_lock, flags);
+	i40e_ptp_write(pf, ts);
+	spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+
+	return 0;
+}
+
+/**
+ * i40e_ptp_tx_work
+ * @work: pointer to work struct
+ *
+ * This work function polls the PRTTSYN_STAT_0.TXTIME bit to determine when a
+ * Tx timestamp event has occurred, in order to pass the Tx timestamp value up
+ * the stack in the skb.
+ */
+static void i40e_ptp_tx_work(struct work_struct *work)
+{
+	struct i40e_pf *pf = container_of(work, struct i40e_pf,
+					  ptp_tx_work);
+	struct i40e_hw *hw = &pf->hw;
+	u32 prttsyn_stat_0;
+
+	if (!pf->ptp_tx_skb)
+		return;
+
+	if (time_is_before_jiffies(pf->ptp_tx_start +
+				   I40E_PTP_TX_TIMEOUT)) {
+		dev_kfree_skb_any(pf->ptp_tx_skb);
+		pf->ptp_tx_skb = NULL;
+		pf->tx_hwtstamp_timeouts++;
+		dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang");
+		return;
+	}
+
+	prttsyn_stat_0 = rd32(hw, I40E_PRTTSYN_STAT_0);
+	if (prttsyn_stat_0 & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
+		i40e_ptp_tx_hwtstamp(pf);
+	else
+		schedule_work(&pf->ptp_tx_work);
+}
+
+/**
+ * i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem
+ * @ptp: The PTP clock structure
+ * @rq: The requested feature to change
+ * @on: Enable/disable flag
+ *
+ * The XL710 does not support any of the ancillary features of the PHC
+ * subsystem, so this function may just return.
+ **/
+static int i40e_ptp_enable(struct ptp_clock_info *ptp,
+			   struct ptp_clock_request *rq, int on)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
+ * @vsi: The VSI with the rings relevant to 1588
+ *
+ * This watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to timestamp
+ * any future packets.
+ **/
+void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
+{
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_ring *rx_ring;
+	unsigned long rx_event;
+	u32 prttsyn_stat;
+	int n;
+
+	if (pf->flags & I40E_FLAG_PTP)
+		return;
+
+	prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+
+	/* Unless all four receive timestamp registers are latched, we are not
+	 * concerned about a possible PTP Rx hang, so just update the timeout
+	 * counter and exit.
+	 */
+	if (!(prttsyn_stat & ((I40E_PRTTSYN_STAT_1_RXT0_MASK <<
+			       I40E_PRTTSYN_STAT_1_RXT0_SHIFT) |
+			      (I40E_PRTTSYN_STAT_1_RXT1_MASK <<
+			       I40E_PRTTSYN_STAT_1_RXT1_SHIFT) |
+			      (I40E_PRTTSYN_STAT_1_RXT2_MASK <<
+			       I40E_PRTTSYN_STAT_1_RXT2_SHIFT) |
+			      (I40E_PRTTSYN_STAT_1_RXT3_MASK <<
+			       I40E_PRTTSYN_STAT_1_RXT3_SHIFT)))) {
+		pf->last_rx_ptp_check = jiffies;
+		return;
+	}
+
+	/* Determine the most recent watchdog or rx_timestamp event. */
+	rx_event = pf->last_rx_ptp_check;
+	for (n = 0; n < vsi->num_queue_pairs; n++) {
+		rx_ring = vsi->rx_rings[n];
+		if (time_after(rx_ring->last_rx_timestamp, rx_event))
+			rx_event = rx_ring->last_rx_timestamp;
+	}
+
+	/* Only need to read the high RXSTMP register to clear the lock */
+	if (time_is_before_jiffies(rx_event + 5 * HZ)) {
+		rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
+		rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
+		rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
+		rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
+		pf->last_rx_ptp_check = jiffies;
+		pf->rx_hwtstamp_cleared++;
+		dev_warn(&vsi->back->pdev->dev,
+			 "%s: clearing Rx timestamp hang",
+			 __func__);
+	}
+}
+
+/**
+ * i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp
+ * @pf: Board private structure
+ *
+ * Read the value of the Tx timestamp from the registers, convert it into a
+ * value consumable by the stack, and store that result into the shhwtstamps
+ * struct before returning it up the stack.
+ **/
+void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
+{
+	struct skb_shared_hwtstamps shhwtstamps;
+	struct i40e_hw *hw = &pf->hw;
+	u32 hi, lo;
+	u64 ns;
+
+	lo = rd32(hw, I40E_PRTTSYN_TXTIME_L);
+	hi = rd32(hw, I40E_PRTTSYN_TXTIME_H);
+
+	ns = (((u64)hi) << 32) | lo;
+
+	i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns);
+	skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
+	dev_kfree_skb_any(pf->ptp_tx_skb);
+	pf->ptp_tx_skb = NULL;
+}
+
+/**
+ * i40e_ptp_rx_hwtstamp - Utility function which checks for an Rx timestamp
+ * @pf: Board private structure
+ * @skb: Particular skb to send timestamp with
+ * @index: Index into the receive timestamp registers for the timestamp
+ *
+ * The XL710 receives a notification in the receive descriptor with an offset
+ * into the set of RXTIME registers where the timestamp is for that skb. This
+ * function goes and fetches the receive timestamp from that offset, if a valid
+ * one exists. The RXTIME registers are in ns, so we must convert the result
+ * first.
+ **/
+void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
+{
+	u32 prttsyn_stat, hi, lo;
+	struct i40e_hw *hw;
+	u64 ns;
+
+	/* Since we cannot turn off the Rx timestamp logic if the device is
+	 * doing Tx timestamping, check if Rx timestamping is configured.
+	 */
+	if (!pf->ptp_rx)
+		return;
+
+	hw = &pf->hw;
+
+	prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+
+	if (!(prttsyn_stat & (1 << index)))
+		return;
+
+	lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
+	hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index));
+
+	ns = (((u64)hi) << 32) | lo;
+
+	i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns);
+}
+
+/**
+ * i40e_ptp_set_increment - Utility function to update clock increment rate
+ * @pf: Board private structure
+ *
+ * During a link change, the DMA frequency that drives the 1588 logic will
+ * change. In order to keep the PRTTSYN_TIME registers in units of nanoseconds,
+ * we must update the increment value per clock tick.
+ **/
+void i40e_ptp_set_increment(struct i40e_pf *pf)
+{
+	struct i40e_link_status *hw_link_info;
+	struct i40e_hw *hw = &pf->hw;
+	u64 incval;
+
+	hw_link_info = &hw->phy.link_info;
+
+	i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+
+	switch (hw_link_info->link_speed) {
+	case I40E_LINK_SPEED_10GB:
+		incval = I40E_PTP_10GB_INCVAL;
+		break;
+	case I40E_LINK_SPEED_1GB:
+		incval = I40E_PTP_1GB_INCVAL;
+		break;
+	case I40E_LINK_SPEED_100MB:
+		dev_warn(&pf->pdev->dev,
+			 "%s: 1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n",
+			 __func__);
+		incval = 0;
+		break;
+	case I40E_LINK_SPEED_40GB:
+	default:
+		incval = I40E_PTP_40GB_INCVAL;
+		break;
+	}
+
+	/* Write the new increment value into the increment register. The
+	 * hardware will not update the clock until both registers have been
+	 * written.
+	 */
+	wr32(hw, I40E_PRTTSYN_INC_L, incval & 0xFFFFFFFF);
+	wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
+
+	/* Update the base adjustement value. */
+	ACCESS_ONCE(pf->ptp_base_adj) = incval;
+	smp_mb(); /* Force the above update. */
+}
+
+/**
+ * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping
+ * @pf: Board private structure
+ * @ifreq: ioctl data
+ *
+ * Obtain the current hardware timestamping settigs as requested. To do this,
+ * keep a shadow copy of the timestamp settings rather than attempting to
+ * deconstruct it from the registers.
+ **/
+int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+{
+	struct hwtstamp_config *config = &pf->tstamp_config;
+
+	return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+		-EFAULT : 0;
+}
+
+/**
+ * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
+ * @pf: Board private structure
+ * @ifreq: ioctl data
+ *
+ * Respond to the user filter requests and make the appropriate hardware
+ * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
+ * logic, so keep track in software of whether to indicate these timestamps
+ * or not.
+ *
+ * It is permissible to "upgrade" the user request to a broader filter, as long
+ * as the user receives the timestamps they care about and the user is notified
+ * the filter has been broadened.
+ **/
+int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+{
+	struct i40e_hw *hw = &pf->hw;
+	struct hwtstamp_config *config = &pf->tstamp_config;
+	u32 pf_id, tsyntype, regval;
+
+	if (copy_from_user(config, ifr->ifr_data, sizeof(*config)))
+		return -EFAULT;
+
+	/* Reserved for future extensions. */
+	if (config->flags)
+		return -EINVAL;
+
+	/* Confirm that 1588 is supported on this PF. */
+	pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >>
+		I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
+	if (hw->pf_id != pf_id)
+		return -EINVAL;
+
+	switch (config->tx_type) {
+	case HWTSTAMP_TX_OFF:
+		pf->ptp_tx = false;
+		break;
+	case HWTSTAMP_TX_ON:
+		pf->ptp_tx = true;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	switch (config->rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		pf->ptp_rx = false;
+		tsyntype = 0;
+		break;
+	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+		pf->ptp_rx = true;
+		tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK |
+			   I40E_PRTTSYN_CTL1_TSYNTYPE_V1 |
+			   I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
+		config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+		pf->ptp_rx = true;
+		tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK |
+			   I40E_PRTTSYN_CTL1_TSYNTYPE_V2 |
+			   I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
+		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+		break;
+	case HWTSTAMP_FILTER_ALL:
+	default:
+		return -ERANGE;
+	}
+
+	/* Clear out all 1588-related registers to clear and unlatch them. */
+	rd32(hw, I40E_PRTTSYN_STAT_0);
+	rd32(hw, I40E_PRTTSYN_TXTIME_H);
+	rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
+	rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
+	rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
+	rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
+
+	/* Enable/disable the Tx timestamp interrupt based on user input. */
+	regval = rd32(hw, I40E_PRTTSYN_CTL0);
+	if (pf->ptp_tx)
+		regval |= I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
+	else
+		regval &= ~I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
+	wr32(hw, I40E_PRTTSYN_CTL0, regval);
+
+	regval = rd32(hw, I40E_PFINT_ICR0_ENA);
+	if (pf->ptp_tx)
+		regval |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+	else
+		regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+	wr32(hw, I40E_PFINT_ICR0_ENA, regval);
+
+	/* There is no simple on/off switch for Rx. To "disable" Rx support,
+	 * ignore any received timestamps, rather than turn off the clock.
+	 */
+	if (pf->ptp_rx) {
+		regval = rd32(hw, I40E_PRTTSYN_CTL1);
+		/* clear everything but the enable bit */
+		regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
+		/* now enable bits for desired Rx timestamps */
+		regval |= tsyntype;
+		wr32(hw, I40E_PRTTSYN_CTL1, regval);
+	}
+
+	return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+		-EFAULT : 0;
+}
+
+/**
+ * i40e_ptp_init - Initialize the 1588 support and register the PHC
+ * @pf: Board private structure
+ *
+ * This function registers the device clock as a PHC. If it is successful, it
+ * starts the clock in the hardware.
+ **/
+void i40e_ptp_init(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = &pf->hw;
+	struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+
+	strncpy(pf->ptp_caps.name, "i40e", sizeof(pf->ptp_caps.name));
+	pf->ptp_caps.owner = THIS_MODULE;
+	pf->ptp_caps.max_adj = 999999999;
+	pf->ptp_caps.n_ext_ts = 0;
+	pf->ptp_caps.pps = 0;
+	pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
+	pf->ptp_caps.adjtime = i40e_ptp_adjtime;
+	pf->ptp_caps.gettime = i40e_ptp_gettime;
+	pf->ptp_caps.settime = i40e_ptp_settime;
+	pf->ptp_caps.enable = i40e_ptp_enable;
+
+	/* Attempt to register the clock before enabling the hardware. */
+	pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
+	if (IS_ERR(pf->ptp_clock)) {
+		pf->ptp_clock = NULL;
+		dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
+			__func__);
+	} else {
+		struct timespec ts;
+		u32 regval;
+
+		spin_lock_init(&pf->tmreg_lock);
+		INIT_WORK(&pf->ptp_tx_work, i40e_ptp_tx_work);
+
+		dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
+			 netdev->name);
+		pf->flags |= I40E_FLAG_PTP;
+
+		/* Ensure the clocks are running. */
+		regval = rd32(hw, I40E_PRTTSYN_CTL0);
+		regval |= I40E_PRTTSYN_CTL0_TSYNENA_MASK;
+		wr32(hw, I40E_PRTTSYN_CTL0, regval);
+		regval = rd32(hw, I40E_PRTTSYN_CTL1);
+		regval |= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
+		wr32(hw, I40E_PRTTSYN_CTL1, regval);
+
+		/* Set the increment value per clock tick. */
+		i40e_ptp_set_increment(pf);
+
+		/* reset the tstamp_config */
+		memset(&pf->tstamp_config, 0, sizeof(pf->tstamp_config));
+
+		/* Set the clock value. */
+		ts = ktime_to_timespec(ktime_get_real());
+		i40e_ptp_settime(&pf->ptp_caps, &ts);
+	}
+}
+
+/**
+ * i40e_ptp_stop - Disable the driver/hardware support and unregister the PHC
+ * @pf: Board private structure
+ *
+ * This function handles the cleanup work required from the initialization by
+ * clearing out the important information and unregistering the PHC.
+ **/
+void i40e_ptp_stop(struct i40e_pf *pf)
+{
+	pf->flags &= ~I40E_FLAG_PTP;
+	pf->ptp_tx = false;
+	pf->ptp_rx = false;
+
+	cancel_work_sync(&pf->ptp_tx_work);
+	if (pf->ptp_tx_skb) {
+		dev_kfree_skb_any(pf->ptp_tx_skb);
+		pf->ptp_tx_skb = NULL;
+	}
+
+	if (pf->ptp_clock) {
+		ptp_clock_unregister(pf->ptp_clock);
+		pf->ptp_clock = NULL;
+		dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__,
+			 pf->vsi[pf->lan_vsi]->netdev->name);
+	}
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 6bd333cde28b..1d40f425acf1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -28,6 +27,10 @@
 #ifndef _I40E_REGISTER_H_
 #define _I40E_REGISTER_H_
 
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK (0xFFFFFFFF << I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
 #define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
 #define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
 #define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
@@ -38,6 +41,11 @@
 #define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
 #define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
 #define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
 #define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
 #define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
 #define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
@@ -50,9 +58,14 @@
 #define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
 #define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
 #define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
 #define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
 #define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
 #define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+
 #define I40E_PF_ARQBAH 0x00080180
 #define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
 #define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
@@ -837,7 +850,7 @@
 #define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
 #define I40E_GLHMC_PEQ1FLMAX 0x000C2058
 #define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
-#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
 #define I40E_GLHMC_PEQ1MAX 0x000C2054
 #define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
 #define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
@@ -903,7 +916,7 @@
 #define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
 #define I40E_GLHMC_PEXFFLMAX 0x000C204c
 #define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x1FFFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
 #define I40E_GLHMC_PEXFMAX 0x000C2048
 #define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
 #define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
@@ -1636,7 +1649,7 @@
 #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
 #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
 #define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
-#define I40E_VSILAN_QTABLE_MAX_INDEX 15
+#define I40E_VSILAN_QTABLE_MAX_INDEX 7
 #define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
 #define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
 #define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
@@ -1773,16 +1786,20 @@
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
 #define I40E_GL_MNG_FWSM 0x000B6134
-#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
-#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x3FF << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 1
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x7 << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 6
 #define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
 #define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
 #define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
 #define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
 #define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK (0x7 << I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
 #define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
 #define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_RSVD_SHIFT 25
+#define I40E_GL_MNG_FWSM_RSVD_MASK (0x1 << I40E_GL_MNG_FWSM_RSVD_SHIFT)
 #define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
 #define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
 #define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
@@ -2035,6 +2052,28 @@
 #define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
 #define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
 #define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+
 #define I40E_GLPCI_BYTCTH 0x0009C484
 #define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
 #define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
@@ -2170,6 +2209,12 @@
 #define I40E_GLPCI_PCIERR 0x000BE4FC
 #define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
 #define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PCITEST2 0x000BE4BC
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK (0x1 << I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT)
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK (0x1 << I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT)
+
 #define I40E_GLPCI_PKTCT 0x0009C4BC
 #define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
 #define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
@@ -2380,8 +2425,7 @@
 #define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
 #define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
 #define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
-#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
+
 #define I40E_PFPE_MRTEIDXMASK 0x00008600
 #define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
 #define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
@@ -2460,8 +2504,6 @@
 #define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
 #define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
 #define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
-#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
 #define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
 #define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
 #define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
@@ -3141,30 +3183,6 @@
 #define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
 #define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
 #define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-#define I40E_GLPM_DMACR 0x000881F4
-#define I40E_GLPM_DMACR_DMACWT_SHIFT 0
-#define I40E_GLPM_DMACR_DMACWT_MASK (0xFFFF << I40E_GLPM_DMACR_DMACWT_SHIFT)
-#define I40E_GLPM_DMACR_EXIT_DC_SHIFT 29
-#define I40E_GLPM_DMACR_EXIT_DC_MASK (0x1 << I40E_GLPM_DMACR_EXIT_DC_SHIFT)
-#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT 30
-#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_MASK (0x1 << I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT)
-#define I40E_GLPM_DMACR_DMAC_EN_SHIFT 31
-#define I40E_GLPM_DMACR_DMAC_EN_MASK (0x1 << I40E_GLPM_DMACR_DMAC_EN_SHIFT)
-#define I40E_GLPM_LTRC 0x000BE500
-#define I40E_GLPM_LTRC_SLTRV_SHIFT 0
-#define I40E_GLPM_LTRC_SLTRV_MASK (0x3FF << I40E_GLPM_LTRC_SLTRV_SHIFT)
-#define I40E_GLPM_LTRC_SSCALE_SHIFT 10
-#define I40E_GLPM_LTRC_SSCALE_MASK (0x7 << I40E_GLPM_LTRC_SSCALE_SHIFT)
-#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT 15
-#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT)
-#define I40E_GLPM_LTRC_NSLTRV_SHIFT 16
-#define I40E_GLPM_LTRC_NSLTRV_MASK (0x3FF << I40E_GLPM_LTRC_NSLTRV_SHIFT)
-#define I40E_GLPM_LTRC_NSSCALE_SHIFT 26
-#define I40E_GLPM_LTRC_NSSCALE_MASK (0x7 << I40E_GLPM_LTRC_NSSCALE_SHIFT)
-#define I40E_GLPM_LTRC_LTR_SEND_SHIFT 30
-#define I40E_GLPM_LTRC_LTR_SEND_MASK (0x1 << I40E_GLPM_LTRC_LTR_SEND_SHIFT)
-#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT 31
-#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT)
 #define I40E_PRTPM_EEE_STAT 0x001E4320
 #define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
 #define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
@@ -3201,9 +3219,6 @@
 #define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
 #define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
 #define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
-#define I40E_PRTPM_HPTC 0x000AC800
-#define I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT 0
-#define I40E_PRTPM_HPTC_HIGH_PRI_TC_MASK (0xFF << I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT)
 #define I40E_PRTPM_RLPIC 0x001E43A0
 #define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
 #define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
@@ -3265,8 +3280,8 @@
 #define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
 #define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
 #define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
-#define I40E_GLQF_CTL_DDPLPEN_SHIFT 7
-#define I40E_GLQF_CTL_DDPLPEN_MASK (0x1 << I40E_GLQF_CTL_DDPLPEN_SHIFT)
+#define I40E_GLQF_CTL_RSVD_SHIFT 7
+#define I40E_GLQF_CTL_RSVD_MASK (0x1 << I40E_GLQF_CTL_RSVD_SHIFT)
 #define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
 #define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
 #define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
@@ -3416,9 +3431,9 @@
 #define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
 #define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
 #define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
-#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
-#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 6
-#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0xF << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x1F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0x1F << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
 #define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
 #define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
 #define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
@@ -3504,7 +3519,7 @@
 #define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
 #define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
 #define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
-#define I40E_VSIQF_TCREGION_MAX_INDEX 7
+#define I40E_VSIQF_TCREGION_MAX_INDEX 3
 #define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
 #define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
 #define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
@@ -3521,10 +3536,7 @@
 #define I40E_GL_FCOEDDPC_MAX_INDEX 143
 #define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
 #define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
-#define I40E_GL_FCOEDDPEC(_i) (0x00314900 + ((_i) * 8)) /* _i=0...143 */
-#define I40E_GL_FCOEDDPEC_MAX_INDEX 143
-#define I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT 0
-#define I40E_GL_FCOEDDPEC_CFOEDDPEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT)
+/* _i=0...143 */
 #define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
 #define I40E_GL_FCOEDIFEC_MAX_INDEX 143
 #define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
@@ -4276,46 +4288,10 @@
 #define I40E_PFPM_APM 0x000B8080
 #define I40E_PFPM_APM_APME_SHIFT 0
 #define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
-#define I40E_PFPM_FHFT_DATA(_i, _j) (0x00060000 + ((_i) * 4096 + (_j) * 128))
-#define I40E_PFPM_FHFT_DATA_MAX_INDEX 7
-#define I40E_PFPM_FHFT_DATA_DWORD_SHIFT 0
-#define I40E_PFPM_FHFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PFPM_FHFT_DATA_DWORD_SHIFT)
 #define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
 #define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
 #define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
 #define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PFPM_FHFT_MASK(_i, _j) (0x00068000 + ((_i) * 1024 + (_j) * 128))
-#define I40E_PFPM_FHFT_MASK_MAX_INDEX 7
-#define I40E_PFPM_FHFT_MASK_MASK_SHIFT 0
-#define I40E_PFPM_FHFT_MASK_MASK_MASK (0xFFFF << I40E_PFPM_FHFT_MASK_MASK_SHIFT)
-#define I40E_PFPM_PROXYFC 0x00245A80
-#define I40E_PFPM_PROXYFC_PPROXYE_SHIFT 0
-#define I40E_PFPM_PROXYFC_PPROXYE_MASK (0x1 << I40E_PFPM_PROXYFC_PPROXYE_SHIFT)
-#define I40E_PFPM_PROXYFC_EX_SHIFT 1
-#define I40E_PFPM_PROXYFC_EX_MASK (0x1 << I40E_PFPM_PROXYFC_EX_SHIFT)
-#define I40E_PFPM_PROXYFC_ARP_SHIFT 4
-#define I40E_PFPM_PROXYFC_ARP_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_SHIFT)
-#define I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT 5
-#define I40E_PFPM_PROXYFC_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYFC_NS_SHIFT 9
-#define I40E_PFPM_PROXYFC_NS_MASK (0x1 << I40E_PFPM_PROXYFC_NS_SHIFT)
-#define I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT 10
-#define I40E_PFPM_PROXYFC_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYFC_MLD_SHIFT 12
-#define I40E_PFPM_PROXYFC_MLD_MASK (0x1 << I40E_PFPM_PROXYFC_MLD_SHIFT)
-#define I40E_PFPM_PROXYS 0x00245B80
-#define I40E_PFPM_PROXYS_EX_SHIFT 1
-#define I40E_PFPM_PROXYS_EX_MASK (0x1 << I40E_PFPM_PROXYS_EX_SHIFT)
-#define I40E_PFPM_PROXYS_ARP_SHIFT 4
-#define I40E_PFPM_PROXYS_ARP_MASK (0x1 << I40E_PFPM_PROXYS_ARP_SHIFT)
-#define I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT 5
-#define I40E_PFPM_PROXYS_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYS_NS_SHIFT 9
-#define I40E_PFPM_PROXYS_NS_MASK (0x1 << I40E_PFPM_PROXYS_NS_SHIFT)
-#define I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT 10
-#define I40E_PFPM_PROXYS_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYS_MLD_SHIFT 12
-#define I40E_PFPM_PROXYS_MLD_MASK (0x1 << I40E_PFPM_PROXYS_MLD_SHIFT)
 #define I40E_PFPM_WUC 0x0006B200
 #define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
 #define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
@@ -4536,21 +4512,21 @@
 #define I40E_VFMSIX_PBA 0x00002000
 #define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
 #define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
 #define I40E_VFMSIX_TADD_MAX_INDEX 16
 #define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
 #define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
 #define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
 #define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
 #define I40E_VFMSIX_TMSG_MAX_INDEX 16
 #define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
 #define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
 #define I40E_VFMSIX_TUADD_MAX_INDEX 16
 #define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
 #define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
 #define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
 #define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
 #define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
@@ -4610,8 +4586,6 @@
 #define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
 #define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
 #define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT 17
-#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT)
 #define I40E_VFPE_MRTEIDXMASK1 0x00009000
 #define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
 #define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
@@ -4684,5 +4658,13 @@
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
 #define I40E_VFQF_HREGION_REGION_7_SHIFT 29
 #define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
-
+#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK (0x7 << I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT)
 #endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index 5e5bcddac573..5f9cac55aa55 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index f1f03bc5c729..d4bb482b1a7f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -77,7 +76,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
 	/* grab the next descriptor */
 	i = tx_ring->next_to_use;
 	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
-	tx_buf = &tx_ring->tx_bi[i];
 
 	tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
 
@@ -129,15 +127,23 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
 	/* Now program a dummy descriptor */
 	i = tx_ring->next_to_use;
 	tx_desc = I40E_TX_DESC(tx_ring, i);
+	tx_buf = &tx_ring->tx_bi[i];
 
 	tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
 
+	/* record length, and DMA address */
+	dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
+	dma_unmap_addr_set(tx_buf, dma, dma);
+
 	tx_desc->buffer_addr = cpu_to_le64(dma);
 	td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
 
 	tx_desc->cmd_type_offset_bsz =
 		build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
 
+	/* set the timestamp */
+	tx_buf->time_stamp = jiffies;
+
 	/* Force memory writes to complete before letting h/w
 	 * know there are new descriptors to fetch.  (Only
 	 * applicable for weak-ordered memory model archs,
@@ -768,7 +774,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
 			skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
 							rx_ring->rx_buf_len);
 			if (!skb) {
-				rx_ring->rx_stats.alloc_rx_buff_failed++;
+				rx_ring->rx_stats.alloc_buff_failed++;
 				goto no_buffers;
 			}
 			/* initialize queue mapping */
@@ -782,7 +788,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
 						 rx_ring->rx_buf_len,
 						 DMA_FROM_DEVICE);
 			if (dma_mapping_error(rx_ring->dev, bi->dma)) {
-				rx_ring->rx_stats.alloc_rx_buff_failed++;
+				rx_ring->rx_stats.alloc_buff_failed++;
 				bi->dma = 0;
 				goto no_buffers;
 			}
@@ -792,7 +798,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
 			if (!bi->page) {
 				bi->page = alloc_page(GFP_ATOMIC);
 				if (!bi->page) {
-					rx_ring->rx_stats.alloc_rx_page_failed++;
+					rx_ring->rx_stats.alloc_page_failed++;
 					goto no_buffers;
 				}
 			}
@@ -807,7 +813,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
 							    DMA_FROM_DEVICE);
 				if (dma_mapping_error(rx_ring->dev,
 						      bi->page_dma)) {
-					rx_ring->rx_stats.alloc_rx_page_failed++;
+					rx_ring->rx_stats.alloc_page_failed++;
 					bi->page_dma = 0;
 					goto no_buffers;
 				}
@@ -860,12 +866,25 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,
  * @skb: skb currently being received and modified
  * @rx_status: status value of last descriptor in packet
  * @rx_error: error value of last descriptor in packet
+ * @rx_ptype: ptype value of last descriptor in packet
  **/
 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
 				    struct sk_buff *skb,
 				    u32 rx_status,
-				    u32 rx_error)
+				    u32 rx_error,
+				    u16 rx_ptype)
 {
+	bool ipv4_tunnel, ipv6_tunnel;
+	__wsum rx_udp_csum;
+	__sum16 csum;
+	struct iphdr *iph;
+
+	ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+		      (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+	ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+		      (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+
+	skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
 	skb->ip_summed = CHECKSUM_NONE;
 
 	/* Rx csum enabled and ip headers found? */
@@ -873,13 +892,47 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
 	      rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
 		return;
 
-	/* IP or L4 checksum error */
+	/* likely incorrect csum if alternate IP extention headers found */
+	if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+		return;
+
+	/* IP or L4 or outmost IP checksum error */
 	if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
-			(1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) {
+			(1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
+			(1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
 		vsi->back->hw_csum_rx_error++;
 		return;
 	}
 
+	if (ipv4_tunnel &&
+	    !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+		/* If VXLAN traffic has an outer UDPv4 checksum we need to check
+		 * it in the driver, hardware does not do it for us.
+		 * Since L3L4P bit was set we assume a valid IHL value (>=5)
+		 * so the total length of IPv4 header is IHL*4 bytes
+		 */
+		skb->transport_header = skb->mac_header +
+					sizeof(struct ethhdr) +
+					(ip_hdr(skb)->ihl * 4);
+
+		/* Add 4 bytes for VLAN tagged packets */
+		skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
+					  skb->protocol == htons(ETH_P_8021AD))
+					  ? VLAN_HLEN : 0;
+
+		rx_udp_csum = udp_csum(skb);
+		iph = ip_hdr(skb);
+		csum = csum_tcpudp_magic(
+				iph->saddr, iph->daddr,
+				(skb->len - skb_transport_offset(skb)),
+				IPPROTO_UDP, rx_udp_csum);
+
+		if (udp_hdr(skb)->check != csum) {
+			vsi->back->hw_csum_rx_error++;
+			return;
+		}
+	}
+
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
 
@@ -891,13 +944,15 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
 static inline u32 i40e_rx_hash(struct i40e_ring *ring,
 			       union i40e_rx_desc *rx_desc)
 {
-	if (ring->netdev->features & NETIF_F_RXHASH) {
-		if ((le64_to_cpu(rx_desc->wb.qword1.status_error_len) >>
-		     I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
-		    I40E_RX_DESC_FLTSTAT_RSS_HASH)
-			return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
-	}
-	return 0;
+	const __le64 rss_mask =
+		cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+			    I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+	if ((ring->netdev->features & NETIF_F_RXHASH) &&
+	    (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
+		return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+	else
+		return 0;
 }
 
 /**
@@ -918,11 +973,12 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 	union i40e_rx_desc *rx_desc;
 	u32 rx_error, rx_status;
 	u64 qword;
+	u16 rx_ptype;
 
 	rx_desc = I40E_RX_DESC(rx_ring, i);
 	qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-	rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
-				>> I40E_RXD_QW1_STATUS_SHIFT;
+	rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+		    I40E_RXD_QW1_STATUS_SHIFT;
 
 	while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
 		union i40e_rx_desc *next_rxd;
@@ -938,18 +994,20 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 		skb = rx_bi->skb;
 		prefetch(skb->data);
 
-		rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
-					      >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-		rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK)
-					      >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
-		rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK)
-					      >> I40E_RXD_QW1_LENGTH_SPH_SHIFT;
+		rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+				I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+		rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
+				I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+		rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
+			 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
 
-		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK)
-					      >> I40E_RXD_QW1_ERROR_SHIFT;
+		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+			   I40E_RXD_QW1_ERROR_SHIFT;
 		rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
 		rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
 
+		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+			   I40E_RXD_QW1_PTYPE_SHIFT;
 		rx_bi->skb = NULL;
 
 		/* This memory barrier is needed to keep us from reading
@@ -1030,13 +1088,21 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 		}
 
 		skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
-		i40e_rx_checksum(vsi, skb, rx_status, rx_error);
+		if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
+			i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
+					   I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+					   I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
+			rx_ring->last_rx_timestamp = jiffies;
+		}
 
 		/* probably a little skewed due to removing CRC */
 		total_rx_bytes += skb->len;
 		total_rx_packets++;
 
 		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+
 		vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
 			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
 			 : 0;
@@ -1059,8 +1125,8 @@ next_desc:
 		/* use prefetched values */
 		rx_desc = next_rxd;
 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
-						>> I40E_RXD_QW1_STATUS_SHIFT;
+		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+			    I40E_RXD_QW1_STATUS_SHIFT;
 	}
 
 	rx_ring->next_to_clean = i;
@@ -1173,7 +1239,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
 	u16 i;
 
 	/* make sure ATR is enabled */
-	if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
+	if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
 		return;
 
 	/* if sampling is disabled do nothing */
@@ -1268,7 +1334,7 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
 		tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
 		tx_flags |= I40E_TX_FLAGS_HW_VLAN;
 	/* else if it is a SW VLAN, check the next protocol and store the tag */
-	} else if (protocol == __constant_htons(ETH_P_8021Q)) {
+	} else if (protocol == htons(ETH_P_8021Q)) {
 		struct vlan_hdr *vhdr, _vhdr;
 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
 		if (!vhdr)
@@ -1333,7 +1399,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
 			return err;
 	}
 
-	if (protocol == __constant_htons(ETH_P_IP)) {
+	if (protocol == htons(ETH_P_IP)) {
 		iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
 		tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
 		iph->tot_len = 0;
@@ -1359,10 +1425,50 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
 	cd_cmd = I40E_TX_CTX_DESC_TSO;
 	cd_tso_len = skb->len - *hdr_len;
 	cd_mss = skb_shinfo(skb)->gso_size;
-	*cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT)
-			     | ((u64)cd_tso_len
-				<< I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
-			     | ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+	*cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+				((u64)cd_tso_len <<
+				 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+				((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+	return 1;
+}
+
+/**
+ * i40e_tsyn - set up the tsyn context descriptor
+ * @tx_ring:  ptr to the ring to send
+ * @skb:      ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ *
+ * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
+ **/
+static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
+		     u32 tx_flags, u64 *cd_type_cmd_tso_mss)
+{
+	struct i40e_pf *pf;
+
+	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+		return 0;
+
+	/* Tx timestamps cannot be sampled when doing TSO */
+	if (tx_flags & I40E_TX_FLAGS_TSO)
+		return 0;
+
+	/* only timestamp the outbound packet if the user has requested it and
+	 * we are not already transmitting a packet to be timestamped
+	 */
+	pf = i40e_netdev_to_pf(tx_ring->netdev);
+	if (pf->ptp_tx && !pf->ptp_tx_skb) {
+		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+		pf->ptp_tx_skb = skb_get(skb);
+	} else {
+		return 0;
+	}
+
+	*cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
+				I40E_TXD_CTX_QW1_CMD_SHIFT;
+
+	pf->ptp_tx_start = jiffies;
+	schedule_work(&pf->ptp_tx_work);
+
 	return 1;
 }
 
@@ -1660,6 +1766,7 @@ dma_error:
 static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 {
 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+	/* Memory barrier before checking head and tail */
 	smp_mb();
 
 	/* Check again in a case another CPU has just made room available. */
@@ -1741,6 +1848,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
 	__be16 protocol;
 	u32 td_cmd = 0;
 	u8 hdr_len = 0;
+	int tsyn;
 	int tso;
 	if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
 		return NETDEV_TX_BUSY;
@@ -1756,9 +1864,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
 	first = &tx_ring->tx_bi[tx_ring->next_to_use];
 
 	/* setup IPv4/IPv6 offloads */
-	if (protocol == __constant_htons(ETH_P_IP))
+	if (protocol == htons(ETH_P_IP))
 		tx_flags |= I40E_TX_FLAGS_IPV4;
-	else if (protocol == __constant_htons(ETH_P_IPV6))
+	else if (protocol == htons(ETH_P_IPV6))
 		tx_flags |= I40E_TX_FLAGS_IPV6;
 
 	tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
@@ -1771,6 +1879,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
 
 	skb_tx_timestamp(skb);
 
+	tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
+
+	if (tsyn)
+		tx_flags |= I40E_TX_FLAGS_TSYN;
+
 	/* always enable CRC insertion offload */
 	td_cmd |= I40E_TX_DESC_CMD_ICRC;
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index db55d9947f15..d5349698e513 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -25,11 +24,13 @@
  *
  ******************************************************************************/
 
+#ifndef _I40E_TXRX_H_
+#define _I40E_TXRX_H_
+
 /* Interrupt Throttling and Rate Limiting (storm control) Goodies */
 
-#define I40E_MAX_ITR               0x07FF
-#define I40E_MIN_ITR               0x0001
-#define I40E_ITR_USEC_RESOLUTION   2
+#define I40E_MAX_ITR               0x0FF0  /* reg uses 2 usec resolution */
+#define I40E_MIN_ITR               0x0004  /* reg uses 2 usec resolution */
 #define I40E_MAX_IRATE             0x03F
 #define I40E_MIN_IRATE             0x001
 #define I40E_IRATE_USEC_RESOLUTION 4
@@ -49,10 +50,43 @@
 
 #define I40E_QUEUE_END_OF_LIST 0x7FF
 
-#define I40E_ITR_NONE  3
-#define I40E_RX_ITR    0
-#define I40E_TX_ITR    1
-#define I40E_PE_ITR    2
+/* this enum matches hardware bits and is meant to be used by DYN_CTLN
+ * registers and QINT registers or more generally anywhere in the manual
+ * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+ * register but instead is a special value meaning "don't update" ITR0/1/2.
+ */
+enum i40e_dyn_idx_t {
+	I40E_IDX_ITR0 = 0,
+	I40E_IDX_ITR1 = 1,
+	I40E_IDX_ITR2 = 2,
+	I40E_ITR_NONE = 3	/* ITR_NONE must not be used as an index */
+};
+
+/* these are indexes into ITRN registers */
+#define I40E_RX_ITR    I40E_IDX_ITR0
+#define I40E_TX_ITR    I40E_IDX_ITR1
+#define I40E_PE_ITR    I40E_IDX_ITR2
+
+/* Supported RSS offloads */
+#define I40E_DEFAULT_RSS_HENA ( \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+	((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+	((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+	((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
 /* Supported Rx Buffer Sizes */
 #define I40E_RXBUFFER_512   512    /* Used for packet split */
 #define I40E_RXBUFFER_2048  2048
@@ -102,6 +136,7 @@
 #define I40E_TX_FLAGS_IPV6		(u32)(1 << 5)
 #define I40E_TX_FLAGS_FCCRC		(u32)(1 << 6)
 #define I40E_TX_FLAGS_FSO		(u32)(1 << 7)
+#define I40E_TX_FLAGS_TSYN		(u32)(1 << 8)
 #define I40E_TX_FLAGS_VLAN_MASK		0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT	29
@@ -139,8 +174,8 @@ struct i40e_tx_queue_stats {
 
 struct i40e_rx_queue_stats {
 	u64 non_eop_descs;
-	u64 alloc_rx_page_failed;
-	u64 alloc_rx_buff_failed;
+	u64 alloc_page_failed;
+	u64 alloc_buff_failed;
 };
 
 enum i40e_ring_state_t {
@@ -214,6 +249,8 @@ struct i40e_ring {
 	u8 atr_sample_rate;
 	u8 atr_count;
 
+	unsigned long last_rx_timestamp;
+
 	bool ring_active;		/* is ring online or not */
 
 	/* stats structs */
@@ -262,3 +299,4 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
 void i40e_free_tx_resources(struct i40e_ring *tx_ring);
 void i40e_free_rx_resources(struct i40e_ring *rx_ring);
 int i40e_napi_poll(struct napi_struct *napi, int budget);
+#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index f3f22b20f02f..181a825d3160 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -36,38 +35,31 @@
 #include "i40e_lan_hmc.h"
 
 /* Device IDs */
-#define I40E_SFP_XL710_DEVICE_ID	0x1572
-#define I40E_SFP_X710_DEVICE_ID		0x1573
-#define I40E_QEMU_DEVICE_ID		0x1574
-#define I40E_KX_A_DEVICE_ID		0x157F
-#define I40E_KX_B_DEVICE_ID		0x1580
-#define I40E_KX_C_DEVICE_ID		0x1581
-#define I40E_KX_D_DEVICE_ID		0x1582
-#define I40E_QSFP_A_DEVICE_ID		0x1583
-#define I40E_QSFP_B_DEVICE_ID		0x1584
-#define I40E_QSFP_C_DEVICE_ID		0x1585
-#define I40E_VF_DEVICE_ID		0x154C
-#define I40E_VF_HV_DEVICE_ID		0x1571
-
-#define I40E_FW_API_VERSION_MAJOR  0x0001
-#define I40E_FW_API_VERSION_MINOR  0x0000
+#define I40E_DEV_ID_SFP_XL710		0x1572
+#define I40E_DEV_ID_SFP_X710		0x1573
+#define I40E_DEV_ID_QEMU		0x1574
+#define I40E_DEV_ID_KX_A		0x157F
+#define I40E_DEV_ID_KX_B		0x1580
+#define I40E_DEV_ID_KX_C		0x1581
+#define I40E_DEV_ID_KX_D		0x1582
+#define I40E_DEV_ID_QSFP_A		0x1583
+#define I40E_DEV_ID_QSFP_B		0x1584
+#define I40E_DEV_ID_QSFP_C		0x1585
+#define I40E_DEV_ID_VF			0x154C
+#define I40E_DEV_ID_VF_HV		0x1571
+
+#define i40e_is_40G_device(d)		((d) == I40E_DEV_ID_QSFP_A  || \
+					 (d) == I40E_DEV_ID_QSFP_B  || \
+					 (d) == I40E_DEV_ID_QSFP_C)
 
 #define I40E_MAX_VSI_QP			16
 #define I40E_MAX_VF_VSI			3
 #define I40E_MAX_CHAINED_RX_BUFFERS	5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS	16
 
 /* Max default timeout in ms, */
 #define I40E_MAX_NVM_TIMEOUT		18000
 
-/* Check whether address is multicast.  This is little-endian specific check.*/
-#define I40E_IS_MULTICAST(address)	\
-	(bool)(((u8 *)(address))[0] & ((u8)0x01))
-
-/* Check whether an address is broadcast. */
-#define I40E_IS_BROADCAST(address)	\
-	((((u8 *)(address))[0] == ((u8)0xff)) && \
-	(((u8 *)(address))[1] == ((u8)0xff)))
-
 /* Switch from mc to the 2usec global time (this is the GTIME resolution) */
 #define I40E_MS_TO_GTIME(time)		(((time) * 1000) / 2)
 
@@ -75,8 +67,6 @@
 struct i40e_hw;
 typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
 
-#define I40E_ETH_LENGTH_OF_ADDRESS	6
-
 /* Data type manipulation macros. */
 
 #define I40E_DESC_UNUSED(R)	\
@@ -85,9 +75,10 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
 
 /* bitfields for Tx queue mapping in QTX_CTL */
 #define I40E_QTX_CTL_VF_QUEUE	0x0
+#define I40E_QTX_CTL_VM_QUEUE	0x1
 #define I40E_QTX_CTL_PF_QUEUE	0x2
 
-/* debug masks */
+/* debug masks - set these bits in hw->debug_mask to control output */
 enum i40e_debug_mask {
 	I40E_DEBUG_INIT			= 0x00000001,
 	I40E_DEBUG_RELEASE		= 0x00000002,
@@ -101,10 +92,10 @@ enum i40e_debug_mask {
 	I40E_DEBUG_DCB			= 0x00000400,
 	I40E_DEBUG_DIAG			= 0x00000800,
 
-	I40E_DEBUG_AQ_MESSAGE		= 0x01000000, /* for i40e_debug() */
+	I40E_DEBUG_AQ_MESSAGE		= 0x01000000,
 	I40E_DEBUG_AQ_DESCRIPTOR	= 0x02000000,
 	I40E_DEBUG_AQ_DESC_BUFFER	= 0x04000000,
-	I40E_DEBUG_AQ_COMMAND		= 0x06000000, /* for i40e_debug_aq() */
+	I40E_DEBUG_AQ_COMMAND		= 0x06000000,
 	I40E_DEBUG_AQ			= 0x0F000000,
 
 	I40E_DEBUG_USER			= 0xF0000000,
@@ -134,6 +125,7 @@ enum i40e_media_type {
 	I40E_MEDIA_TYPE_BASET,
 	I40E_MEDIA_TYPE_BACKPLANE,
 	I40E_MEDIA_TYPE_CX4,
+	I40E_MEDIA_TYPE_DA,
 	I40E_MEDIA_TYPE_VIRTUAL
 };
 
@@ -171,6 +163,7 @@ struct i40e_link_status {
 	u8 link_info;
 	u8 an_info;
 	u8 ext_info;
+	u8 loopback;
 	/* is Link Status Event notification to SW enabled */
 	bool lse_enable;
 };
@@ -236,9 +229,9 @@ struct i40e_hw_capabilities {
 
 struct i40e_mac_info {
 	enum i40e_mac_type type;
-	u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
-	u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
-	u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+	u8 addr[ETH_ALEN];
+	u8 perm_addr[ETH_ALEN];
+	u8 san_addr[ETH_ALEN];
 	u16 max_fcoeq;
 };
 
@@ -500,18 +493,26 @@ enum i40e_rx_desc_status_bits {
 	I40E_RX_DESC_STATUS_L2TAG1P_SHIFT	= 2,
 	I40E_RX_DESC_STATUS_L3L4P_SHIFT		= 3,
 	I40E_RX_DESC_STATUS_CRCP_SHIFT		= 4,
-	I40E_RX_DESC_STATUS_TSYNINDX_SHIFT	= 5, /* 3 BITS */
+	I40E_RX_DESC_STATUS_TSYNINDX_SHIFT	= 5, /* 2 BITS */
+	I40E_RX_DESC_STATUS_TSYNVALID_SHIFT	= 7,
 	I40E_RX_DESC_STATUS_PIF_SHIFT		= 8,
 	I40E_RX_DESC_STATUS_UMBCAST_SHIFT	= 9, /* 2 BITS */
 	I40E_RX_DESC_STATUS_FLM_SHIFT		= 11,
 	I40E_RX_DESC_STATUS_FLTSTAT_SHIFT	= 12, /* 2 BITS */
-	I40E_RX_DESC_STATUS_LPBK_SHIFT		= 14
+	I40E_RX_DESC_STATUS_LPBK_SHIFT		= 14,
+	I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT	= 15,
+	I40E_RX_DESC_STATUS_RESERVED_SHIFT	= 16, /* 2 BITS */
+	I40E_RX_DESC_STATUS_UDP_0_SHIFT		= 18
 };
 
 #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK	(0x7UL << \
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK	(0x3UL << \
 					     I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
 
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK	(0x1UL << \
+					 I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
 enum i40e_rx_desc_fltstat_values {
 	I40E_RX_DESC_FLTSTAT_NO_DATA	= 0,
 	I40E_RX_DESC_FLTSTAT_RSV_FD_ID	= 1, /* 16byte desc? FD_ID : RSV */
@@ -547,28 +548,32 @@ enum i40e_rx_desc_error_l3l4e_fcoe_masks {
 
 /* Packet type non-ip values */
 enum i40e_rx_l2_ptype {
-	I40E_RX_PTYPE_L2_RESERVED		= 0,
-	I40E_RX_PTYPE_L2_MAC_PAY2		= 1,
-	I40E_RX_PTYPE_L2_TIMESYNC_PAY2		= 2,
-	I40E_RX_PTYPE_L2_FIP_PAY2		= 3,
-	I40E_RX_PTYPE_L2_OUI_PAY2		= 4,
-	I40E_RX_PTYPE_L2_MACCNTRL_PAY2		= 5,
-	I40E_RX_PTYPE_L2_LLDP_PAY2		= 6,
-	I40E_RX_PTYPE_L2_ECP_PAY2		= 7,
-	I40E_RX_PTYPE_L2_EVB_PAY2		= 8,
-	I40E_RX_PTYPE_L2_QCN_PAY2		= 9,
-	I40E_RX_PTYPE_L2_EAPOL_PAY2		= 10,
-	I40E_RX_PTYPE_L2_ARP			= 11,
-	I40E_RX_PTYPE_L2_FCOE_PAY3		= 12,
-	I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3	= 13,
-	I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3	= 14,
-	I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3	= 15,
-	I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA	= 16,
-	I40E_RX_PTYPE_L2_FCOE_VFT_PAY3		= 17,
-	I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA	= 18,
-	I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY		= 19,
-	I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP		= 20,
-	I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER	= 21
+	I40E_RX_PTYPE_L2_RESERVED			= 0,
+	I40E_RX_PTYPE_L2_MAC_PAY2			= 1,
+	I40E_RX_PTYPE_L2_TIMESYNC_PAY2			= 2,
+	I40E_RX_PTYPE_L2_FIP_PAY2			= 3,
+	I40E_RX_PTYPE_L2_OUI_PAY2			= 4,
+	I40E_RX_PTYPE_L2_MACCNTRL_PAY2			= 5,
+	I40E_RX_PTYPE_L2_LLDP_PAY2			= 6,
+	I40E_RX_PTYPE_L2_ECP_PAY2			= 7,
+	I40E_RX_PTYPE_L2_EVB_PAY2			= 8,
+	I40E_RX_PTYPE_L2_QCN_PAY2			= 9,
+	I40E_RX_PTYPE_L2_EAPOL_PAY2			= 10,
+	I40E_RX_PTYPE_L2_ARP				= 11,
+	I40E_RX_PTYPE_L2_FCOE_PAY3			= 12,
+	I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3		= 13,
+	I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3		= 14,
+	I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3		= 15,
+	I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA		= 16,
+	I40E_RX_PTYPE_L2_FCOE_VFT_PAY3			= 17,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA		= 18,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY			= 19,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP			= 20,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER		= 21,
+	I40E_RX_PTYPE_GRENAT4_MAC_PAY3			= 58,
+	I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4	= 87,
+	I40E_RX_PTYPE_GRENAT6_MAC_PAY3			= 124,
+	I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4	= 153
 };
 
 struct i40e_rx_ptype_decoded {
@@ -852,10 +857,7 @@ struct i40e_filter_program_desc {
 
 /* Packet Classifier Types for filters */
 enum i40e_filter_pctype {
-	/* Note: Value 0-25 are reserved for future use */
-	I40E_FILTER_PCTYPE_IPV4_TEREDO_UDP		= 26,
-	I40E_FILTER_PCTYPE_IPV6_TEREDO_UDP		= 27,
-	I40E_FILTER_PCTYPE_NONF_IPV4_1588_UDP		= 28,
+	/* Note: Values 0-28 are reserved for future use */
 	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP	= 29,
 	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP	= 30,
 	I40E_FILTER_PCTYPE_NONF_IPV4_UDP		= 31,
@@ -864,8 +866,7 @@ enum i40e_filter_pctype {
 	I40E_FILTER_PCTYPE_NONF_IPV4_SCTP		= 34,
 	I40E_FILTER_PCTYPE_NONF_IPV4_OTHER		= 35,
 	I40E_FILTER_PCTYPE_FRAG_IPV4			= 36,
-	/* Note: Value 37 is reserved for future use */
-	I40E_FILTER_PCTYPE_NONF_IPV6_1588_UDP		= 38,
+	/* Note: Values 37-38 are reserved for future use */
 	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP	= 39,
 	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP	= 40,
 	I40E_FILTER_PCTYPE_NONF_IPV6_UDP		= 41,
@@ -877,7 +878,8 @@ enum i40e_filter_pctype {
 	/* Note: Value 47 is reserved for future use */
 	I40E_FILTER_PCTYPE_FCOE_OX			= 48,
 	I40E_FILTER_PCTYPE_FCOE_RX			= 49,
-	/* Note: Value 50-62 are reserved for future use */
+	I40E_FILTER_PCTYPE_FCOE_OTHER			= 50,
+	/* Note: Values 51-62 are reserved for future use */
 	I40E_FILTER_PCTYPE_L2_PAYLOAD			= 63,
 };
 
@@ -1014,6 +1016,7 @@ struct i40e_hw_port_stats {
 #define I40E_SR_NVM_CONTROL_WORD		0x00
 #define I40E_SR_EMP_MODULE_PTR			0x0F
 #define I40E_SR_NVM_IMAGE_VERSION		0x18
+#define I40E_SR_NVM_WAKE_ON_LAN			0x19
 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR	0x27
 #define I40E_SR_NVM_EETRACK_LO			0x2D
 #define I40E_SR_NVM_EETRACK_HI			0x2E
@@ -1138,17 +1141,4 @@ enum i40e_reset_type {
 	I40E_RESET_GLOBR	= 2,
 	I40E_RESET_EMPR		= 3,
 };
-
-/* IEEE 802.1AB LLDP Agent Variables from NVM */
-#define I40E_NVM_LLDP_CFG_PTR		0xF
-struct i40e_lldp_variables {
-	u16 length;
-	u16 adminstatus;
-	u16 msgfasttx;
-	u16 msgtxinterval;
-	u16 txparams;
-	u16 timers;
-	u16 crc8;
-};
-
 #endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index cc6654f1dac7..22a1b69cd646 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -142,7 +141,7 @@ struct i40e_virtchnl_vsi_resource {
 	u16 num_queue_pairs;
 	enum i40e_vsi_type vsi_type;
 	u16 qset_handle;
-	u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+	u8 default_mac_addr[ETH_ALEN];
 };
 /* VF offload flags */
 #define I40E_VIRTCHNL_VF_OFFLOAD_L2	0x00000001
@@ -265,7 +264,7 @@ struct i40e_virtchnl_queue_select {
  */
 
 struct i40e_virtchnl_ether_addr {
-	u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+	u8 addr[ETH_ALEN];
 	u8 pad[2];
 };
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 07596982a477..b9d1c1c8ca5a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -70,7 +69,7 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
 {
 	struct i40e_pf *pf = vf->pf;
 
-	return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
+	return vector_id <= pf->hw.func_caps.num_msix_vectors_vf;
 }
 
 /***********************vf resource mgmt routines*****************/
@@ -102,130 +101,6 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
 }
 
 /**
- * i40e_ctrl_vsi_tx_queue
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
- * @vsi_queue_id: vsi relative queue index
- * @ctrl: control flags
- *
- * enable/disable/enable check/disable check
- **/
-static int i40e_ctrl_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
-				  u16 vsi_queue_id,
-				  enum i40e_queue_ctrl ctrl)
-{
-	struct i40e_pf *pf = vf->pf;
-	struct i40e_hw *hw = &pf->hw;
-	bool writeback = false;
-	u16 pf_queue_id;
-	int ret = 0;
-	u32 reg;
-
-	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
-	reg = rd32(hw, I40E_QTX_ENA(pf_queue_id));
-
-	switch (ctrl) {
-	case I40E_QUEUE_CTRL_ENABLE:
-		reg |= I40E_QTX_ENA_QENA_REQ_MASK;
-		writeback = true;
-		break;
-	case I40E_QUEUE_CTRL_ENABLECHECK:
-		ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
-		break;
-	case I40E_QUEUE_CTRL_DISABLE:
-		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
-		writeback = true;
-		break;
-	case I40E_QUEUE_CTRL_DISABLECHECK:
-		ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
-		break;
-	case I40E_QUEUE_CTRL_FASTDISABLE:
-		reg |= I40E_QTX_ENA_FAST_QDIS_MASK;
-		writeback = true;
-		break;
-	case I40E_QUEUE_CTRL_FASTDISABLECHECK:
-		ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
-		if (!ret) {
-			reg &= ~I40E_QTX_ENA_FAST_QDIS_MASK;
-			writeback = true;
-		}
-		break;
-	default:
-		ret = -EINVAL;
-		break;
-	}
-
-	if (writeback) {
-		wr32(hw, I40E_QTX_ENA(pf_queue_id), reg);
-		i40e_flush(hw);
-	}
-
-	return ret;
-}
-
-/**
- * i40e_ctrl_vsi_rx_queue
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
- * @vsi_queue_id: vsi relative queue index
- * @ctrl: control flags
- *
- * enable/disable/enable check/disable check
- **/
-static int i40e_ctrl_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
-				  u16 vsi_queue_id,
-				  enum i40e_queue_ctrl ctrl)
-{
-	struct i40e_pf *pf = vf->pf;
-	struct i40e_hw *hw = &pf->hw;
-	bool writeback = false;
-	u16 pf_queue_id;
-	int ret = 0;
-	u32 reg;
-
-	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
-	reg = rd32(hw, I40E_QRX_ENA(pf_queue_id));
-
-	switch (ctrl) {
-	case I40E_QUEUE_CTRL_ENABLE:
-		reg |= I40E_QRX_ENA_QENA_REQ_MASK;
-		writeback = true;
-		break;
-	case I40E_QUEUE_CTRL_ENABLECHECK:
-		ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
-		break;
-	case I40E_QUEUE_CTRL_DISABLE:
-		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
-		writeback = true;
-		break;
-	case I40E_QUEUE_CTRL_DISABLECHECK:
-		ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
-		break;
-	case I40E_QUEUE_CTRL_FASTDISABLE:
-		reg |= I40E_QRX_ENA_FAST_QDIS_MASK;
-		writeback = true;
-		break;
-	case I40E_QUEUE_CTRL_FASTDISABLECHECK:
-		ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
-		if (!ret) {
-			reg &= ~I40E_QRX_ENA_FAST_QDIS_MASK;
-			writeback = true;
-		}
-		break;
-	default:
-		ret = -EINVAL;
-		break;
-	}
-
-	if (writeback) {
-		wr32(hw, I40E_QRX_ENA(pf_queue_id), reg);
-		i40e_flush(hw);
-	}
-
-	return ret;
-}
-
-/**
  * i40e_config_irq_link_list
  * @vf: pointer to the vf info
  * @vsi_idx: index of VSI in PF struct
@@ -260,23 +135,17 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
 		goto irq_list_done;
 	}
 	tempmap = vecmap->rxq_map;
-	vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
-	while (vsi_queue_id < I40E_MAX_VSI_QP) {
+	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 		linklistmap |= (1 <<
 				(I40E_VIRTCHNL_SUPPORTED_QTYPES *
 				 vsi_queue_id));
-		vsi_queue_id =
-		    find_next_bit(&tempmap, I40E_MAX_VSI_QP, vsi_queue_id + 1);
 	}
 
 	tempmap = vecmap->txq_map;
-	vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
-	while (vsi_queue_id < I40E_MAX_VSI_QP) {
+	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 		linklistmap |= (1 <<
 				(I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
 				 + 1));
-		vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
-					     vsi_queue_id + 1);
 	}
 
 	next_q = find_first_bit(&linklistmap,
@@ -307,7 +176,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
 				       (I40E_MAX_VSI_QP *
 					I40E_VIRTCHNL_SUPPORTED_QTYPES),
 				       next_q + 1);
-		if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+		if (next_q <
+		    (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
 			vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 			qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
 			pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
@@ -499,7 +369,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
 {
 	struct i40e_mac_filter *f = NULL;
 	struct i40e_pf *pf = vf->pf;
-	struct i40e_hw *hw = &pf->hw;
 	struct i40e_vsi *vsi;
 	int ret = 0;
 
@@ -513,14 +382,32 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
 		goto error_alloc_vsi_res;
 	}
 	if (type == I40E_VSI_SRIOV) {
+		u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 		vf->lan_vsi_index = vsi->idx;
 		vf->lan_vsi_id = vsi->id;
 		dev_info(&pf->pdev->dev,
-			 "LAN VSI index %d, VSI id %d\n",
-			 vsi->idx, vsi->id);
+			 "VF %d assigned LAN VSI index %d, VSI id %d\n",
+			 vf->vf_id, vsi->idx, vsi->id);
+		/* If the port VLAN has been configured and then the
+		 * VF driver was removed then the VSI port VLAN
+		 * configuration was destroyed.  Check if there is
+		 * a port VLAN and restore the VSI configuration if
+		 * needed.
+		 */
+		if (vf->port_vlan_id)
+			i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
 		f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
-				    0, true, false);
+				    vf->port_vlan_id, true, false);
+		if (!f)
+			dev_info(&pf->pdev->dev,
+				 "Could not allocate VF MAC addr\n");
+		f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id,
+				    true, false);
+		if (!f)
+			dev_info(&pf->pdev->dev,
+				 "Could not allocate VF broadcast filter\n");
 	}
+
 	if (!f) {
 		dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
 		ret = -ENOMEM;
@@ -534,150 +421,11 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
 		goto error_alloc_vsi_res;
 	}
 
-	/* accept bcast pkts. by default */
-	ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
-	if (ret) {
-		dev_err(&pf->pdev->dev,
-			"set vsi bcast failed for vf %d, vsi %d, aq_err %d\n",
-			vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status);
-		ret = -EINVAL;
-	}
-
 error_alloc_vsi_res:
 	return ret;
 }
 
 /**
- * i40e_reset_vf
- * @vf: pointer to the vf structure
- * @flr: VFLR was issued or not
- *
- * reset the vf
- **/
-int i40e_reset_vf(struct i40e_vf *vf, bool flr)
-{
-	int ret = -ENOENT;
-	struct i40e_pf *pf = vf->pf;
-	struct i40e_hw *hw = &pf->hw;
-	u32 reg, reg_idx, msix_vf;
-	bool rsd = false;
-	u16 pf_queue_id;
-	int i, j;
-
-	/* warn the VF */
-	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_INPROGRESS);
-
-	clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
-
-	/* PF triggers VFR only when VF requests, in case of
-	 * VFLR, HW triggers VFR
-	 */
-	if (!flr) {
-		/* reset vf using VPGEN_VFRTRIG reg */
-		reg = I40E_VPGEN_VFRTRIG_VFSWR_MASK;
-		wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
-		i40e_flush(hw);
-	}
-
-	/* poll VPGEN_VFRSTAT reg to make sure
-	 * that reset is complete
-	 */
-	for (i = 0; i < 4; i++) {
-		/* vf reset requires driver to first reset the
-		 * vf & than poll the status register to make sure
-		 * that the requested op was completed
-		 * successfully
-		 */
-		udelay(10);
-		reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
-		if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
-			rsd = true;
-			break;
-		}
-	}
-
-	if (!rsd)
-		dev_err(&pf->pdev->dev, "VF reset check timeout %d\n",
-			vf->vf_id);
-
-	/* fast disable qps */
-	for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
-		ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
-					     I40E_QUEUE_CTRL_FASTDISABLE);
-		ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
-					     I40E_QUEUE_CTRL_FASTDISABLE);
-	}
-
-	/* Queue enable/disable requires driver to
-	 * first reset the vf & than poll the status register
-	 * to make sure that the requested op was completed
-	 * successfully
-	 */
-	udelay(10);
-	for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
-		ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
-					     I40E_QUEUE_CTRL_FASTDISABLECHECK);
-		if (ret)
-			dev_info(&pf->pdev->dev,
-				 "Queue control check failed on Tx queue %d of VSI %d VF %d\n",
-				 vf->lan_vsi_index, j, vf->vf_id);
-		ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
-					     I40E_QUEUE_CTRL_FASTDISABLECHECK);
-		if (ret)
-			dev_info(&pf->pdev->dev,
-				 "Queue control check failed on Rx queue %d of VSI %d VF %d\n",
-				 vf->lan_vsi_index, j, vf->vf_id);
-	}
-
-	/* clear the irq settings */
-	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
-	for (i = 0; i < msix_vf; i++) {
-		/* format is same for both registers */
-		if (0 == i)
-			reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
-		else
-			reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
-						      (vf->vf_id))
-						     + (i - 1));
-		reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
-		       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
-		wr32(hw, reg_idx, reg);
-		i40e_flush(hw);
-	}
-	/* disable interrupts so the VF starts in a known state */
-	for (i = 0; i < msix_vf; i++) {
-		/* format is same for both registers */
-		if (0 == i)
-			reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
-		else
-			reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
-						      (vf->vf_id))
-						     + (i - 1));
-		wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
-		i40e_flush(hw);
-	}
-
-	/* set the defaults for the rqctl & tqctl registers */
-	reg = (I40E_QINT_RQCTL_NEXTQ_INDX_MASK | I40E_QINT_RQCTL_ITR_INDX_MASK |
-	       I40E_QINT_RQCTL_NEXTQ_TYPE_MASK);
-	for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
-		pf_queue_id = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
-		wr32(hw, I40E_QINT_RQCTL(pf_queue_id), reg);
-		wr32(hw, I40E_QINT_TQCTL(pf_queue_id), reg);
-	}
-
-	/* clear the reset bit in the VPGEN_VFRTRIG reg */
-	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
-	reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
-	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
-	/* tell the VF the reset is done */
-	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
-	i40e_flush(hw);
-
-	return ret;
-}
-
-/**
  * i40e_enable_vf_mappings
  * @vf: pointer to the vf info
  *
@@ -756,6 +504,9 @@ static void i40e_disable_vf_mappings(struct i40e_vf *vf)
 static void i40e_free_vf_res(struct i40e_vf *vf)
 {
 	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	u32 reg_idx, reg;
+	int i, msix_vf;
 
 	/* free vsi & disconnect it from the parent uplink */
 	if (vf->lan_vsi_index) {
@@ -763,6 +514,34 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
 		vf->lan_vsi_index = 0;
 		vf->lan_vsi_id = 0;
 	}
+	msix_vf = pf->hw.func_caps.num_msix_vectors_vf + 1;
+	/* disable interrupts so the VF starts in a known state */
+	for (i = 0; i < msix_vf; i++) {
+		/* format is same for both registers */
+		if (0 == i)
+			reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
+		else
+			reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
+						      (vf->vf_id))
+						     + (i - 1));
+		wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+		i40e_flush(hw);
+	}
+
+	/* clear the irq settings */
+	for (i = 0; i < msix_vf; i++) {
+		/* format is same for both registers */
+		if (0 == i)
+			reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+		else
+			reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
+						      (vf->vf_id))
+						     + (i - 1));
+		reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
+		       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+		wr32(hw, reg_idx, reg);
+		i40e_flush(hw);
+	}
 	/* reset some of the state varibles keeping
 	 * track of the resources
 	 */
@@ -804,6 +583,111 @@ error_alloc:
 	return ret;
 }
 
+#define VF_DEVICE_STATUS 0xAA
+#define VF_TRANS_PENDING_MASK 0x20
+/**
+ * i40e_quiesce_vf_pci
+ * @vf: pointer to the vf structure
+ *
+ * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
+ * if the transactions never clear.
+ **/
+static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	int vf_abs_id, i;
+	u32 reg;
+
+	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+	wr32(hw, I40E_PF_PCI_CIAA,
+	     VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
+	for (i = 0; i < 100; i++) {
+		reg = rd32(hw, I40E_PF_PCI_CIAD);
+		if ((reg & VF_TRANS_PENDING_MASK) == 0)
+			return 0;
+		udelay(1);
+	}
+	return -EIO;
+}
+
+/**
+ * i40e_reset_vf
+ * @vf: pointer to the vf structure
+ * @flr: VFLR was issued or not
+ *
+ * reset the vf
+ **/
+void i40e_reset_vf(struct i40e_vf *vf, bool flr)
+{
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	bool rsd = false;
+	int i;
+	u32 reg;
+
+	/* warn the VF */
+	clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+
+	/* In the case of a VFLR, the HW has already reset the VF and we
+	 * just need to clean up, so don't hit the VFRTRIG register.
+	 */
+	if (!flr) {
+		/* reset vf using VPGEN_VFRTRIG reg */
+		reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+		reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+		wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+		i40e_flush(hw);
+	}
+
+	if (i40e_quiesce_vf_pci(vf))
+		dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
+			vf->vf_id);
+
+	/* poll VPGEN_VFRSTAT reg to make sure
+	 * that reset is complete
+	 */
+	for (i = 0; i < 100; i++) {
+		/* vf reset requires driver to first reset the
+		 * vf & than poll the status register to make sure
+		 * that the requested op was completed
+		 * successfully
+		 */
+		udelay(10);
+		reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+		if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
+			rsd = true;
+			break;
+		}
+	}
+
+	if (!rsd)
+		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+			vf->vf_id);
+	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
+	/* clear the reset bit in the VPGEN_VFRTRIG reg */
+	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+	reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+
+	/* On initial reset, we won't have any queues */
+	if (vf->lan_vsi_index == 0)
+		goto complete_reset;
+
+	i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false);
+complete_reset:
+	/* reallocate vf resources to reset the VSI state */
+	i40e_free_vf_res(vf);
+	mdelay(10);
+	i40e_alloc_vf_res(vf);
+	i40e_enable_vf_mappings(vf);
+
+	/* tell the VF the reset is done */
+	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
+	i40e_flush(hw);
+}
+
 /**
  * i40e_vfs_are_assigned
  * @pf: pointer to the pf structure
@@ -816,7 +700,7 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
 	struct pci_dev *vfdev;
 
 	/* loop through all the VFs to see if we own any that are assigned */
-	vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL);
+	vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF , NULL);
 	while (vfdev) {
 		/* if we don't own it we don't care */
 		if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
@@ -826,12 +710,82 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
 		}
 
 		vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
-				       I40E_VF_DEVICE_ID,
+				       I40E_DEV_ID_VF,
 				       vfdev);
 	}
 
 	return false;
 }
+#ifdef CONFIG_PCI_IOV
+
+/**
+ * i40e_enable_pf_switch_lb
+ * @pf: pointer to the pf structure
+ *
+ * enable switch loop back or die - no point in a return value
+ **/
+static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+{
+	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+	struct i40e_vsi_context ctxt;
+	int aq_ret;
+
+	ctxt.seid = pf->main_vsi_seid;
+	ctxt.pf_num = pf->hw.pf_id;
+	ctxt.vf_num = 0;
+	aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+	if (aq_ret) {
+		dev_info(&pf->pdev->dev,
+			 "%s couldn't get pf vsi config, err %d, aq_err %d\n",
+			 __func__, aq_ret, pf->hw.aq.asq_last_status);
+		return;
+	}
+	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+	ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+	aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+	if (aq_ret) {
+		dev_info(&pf->pdev->dev,
+			 "%s: update vsi switch failed, aq_err=%d\n",
+			 __func__, vsi->back->hw.aq.asq_last_status);
+	}
+}
+#endif
+
+/**
+ * i40e_disable_pf_switch_lb
+ * @pf: pointer to the pf structure
+ *
+ * disable switch loop back or die - no point in a return value
+ **/
+static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+{
+	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+	struct i40e_vsi_context ctxt;
+	int aq_ret;
+
+	ctxt.seid = pf->main_vsi_seid;
+	ctxt.pf_num = pf->hw.pf_id;
+	ctxt.vf_num = 0;
+	aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+	if (aq_ret) {
+		dev_info(&pf->pdev->dev,
+			 "%s couldn't get pf vsi config, err %d, aq_err %d\n",
+			 __func__, aq_ret, pf->hw.aq.asq_last_status);
+		return;
+	}
+	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+	ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+	aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+	if (aq_ret) {
+		dev_info(&pf->pdev->dev,
+			 "%s: update vsi switch failed, aq_err=%d\n",
+			 __func__, vsi->back->hw.aq.asq_last_status);
+	}
+}
 
 /**
  * i40e_free_vfs
@@ -842,17 +796,20 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
 void i40e_free_vfs(struct i40e_pf *pf)
 {
 	struct i40e_hw *hw = &pf->hw;
-	int i;
+	u32 reg_idx, bit_idx;
+	int i, tmp, vf_id;
 
 	if (!pf->vf)
 		return;
 
 	/* Disable interrupt 0 so we don't try to handle the VFLR. */
-	wr32(hw, I40E_PFINT_DYN_CTL0, 0);
-	i40e_flush(hw);
+	i40e_irq_dynamic_disable_icr0(pf);
 
+	mdelay(10); /* let any messages in transit get finished up */
 	/* free up vf resources */
-	for (i = 0; i < pf->num_alloc_vfs; i++) {
+	tmp = pf->num_alloc_vfs;
+	pf->num_alloc_vfs = 0;
+	for (i = 0; i < tmp; i++) {
 		if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
 			i40e_free_vf_res(&pf->vf[i]);
 		/* disable qp mappings */
@@ -861,20 +818,25 @@ void i40e_free_vfs(struct i40e_pf *pf)
 
 	kfree(pf->vf);
 	pf->vf = NULL;
-	pf->num_alloc_vfs = 0;
 
-	if (!i40e_vfs_are_assigned(pf))
+	if (!i40e_vfs_are_assigned(pf)) {
 		pci_disable_sriov(pf->pdev);
-	else
+		/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
+		 * work correctly when SR-IOV gets re-enabled.
+		 */
+		for (vf_id = 0; vf_id < tmp; vf_id++) {
+			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+		}
+		i40e_disable_pf_switch_lb(pf);
+	} else {
 		dev_warn(&pf->pdev->dev,
 			 "unable to disable SR-IOV because VFs are assigned.\n");
+	}
 
 	/* Re-enable interrupt 0. */
-	wr32(hw, I40E_PFINT_DYN_CTL0,
-	     I40E_PFINT_DYN_CTL0_INTENA_MASK |
-	     I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
-	     (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
-	i40e_flush(hw);
+	i40e_irq_dynamic_enable_icr0(pf);
 }
 
 #ifdef CONFIG_PCI_IOV
@@ -890,6 +852,9 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
 	struct i40e_vf *vfs;
 	int i, ret = 0;
 
+	/* Disable interrupt 0 so we don't try to handle the VFLR. */
+	i40e_irq_dynamic_disable_icr0(pf);
+
 	ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
 	if (ret) {
 		dev_err(&pf->pdev->dev,
@@ -913,11 +878,8 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
 
 		/* assign default capabilities */
 		set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
-
-		ret = i40e_alloc_vf_res(&vfs[i]);
-		i40e_reset_vf(&vfs[i], true);
-		if (ret)
-			break;
+		/* vf resources get allocated during reset */
+		i40e_reset_vf(&vfs[i], false);
 
 		/* enable vf vplan_qtable mappings */
 		i40e_enable_vf_mappings(&vfs[i]);
@@ -925,10 +887,13 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
 	pf->vf = vfs;
 	pf->num_alloc_vfs = num_alloc_vfs;
 
+	i40e_enable_pf_switch_lb(pf);
 err_alloc:
 	if (ret)
 		i40e_free_vfs(pf);
 err_iov:
+	/* Re-enable interrupt 0. */
+	i40e_irq_dynamic_enable_icr0(pf);
 	return ret;
 }
 
@@ -1009,6 +974,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
 {
 	struct i40e_pf *pf = vf->pf;
 	struct i40e_hw *hw = &pf->hw;
+	int true_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
 	i40e_status aq_ret;
 
 	/* single place to detect unsuccessful return values */
@@ -1028,8 +994,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
 		vf->num_valid_msgs++;
 	}
 
-	aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
-				     msg, msglen, NULL);
+	aq_ret = i40e_aq_send_msg_to_vf(hw, true_vf_id,	v_opcode, v_retval,
+					msg, msglen, NULL);
 	if (aq_ret) {
 		dev_err(&pf->pdev->dev,
 			"Unable to send the message to VF %d aq_err %d\n",
@@ -1144,12 +1110,10 @@ err:
  * unlike other virtchnl messages, pf driver
  * doesn't send the response back to the vf
  **/
-static int i40e_vc_reset_vf_msg(struct i40e_vf *vf)
+static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
 {
-	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
-		return -ENOENT;
-
-	return i40e_reset_vf(vf, false);
+	if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+		i40e_reset_vf(vf, false);
 }
 
 /**
@@ -1291,27 +1255,21 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
 
 		/* lookout for the invalid queue index */
 		tempmap = map->rxq_map;
-		vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
-		while (vsi_queue_id < I40E_MAX_VSI_QP) {
+		for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
 						      vsi_queue_id)) {
 				aq_ret = I40E_ERR_PARAM;
 				goto error_param;
 			}
-			vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
-						     vsi_queue_id + 1);
 		}
 
 		tempmap = map->txq_map;
-		vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
-		while (vsi_queue_id < I40E_MAX_VSI_QP) {
+		for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
 						      vsi_queue_id)) {
 				aq_ret = I40E_ERR_PARAM;
 				goto error_param;
 			}
-			vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
-						     vsi_queue_id + 1);
 		}
 
 		i40e_config_irq_link_list(vf, vsi_id, map);
@@ -1337,8 +1295,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
 	struct i40e_pf *pf = vf->pf;
 	u16 vsi_id = vqs->vsi_id;
 	i40e_status aq_ret = 0;
-	unsigned long tempmap;
-	u16 queue_id;
 
 	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
 		aq_ret = I40E_ERR_PARAM;
@@ -1354,66 +1310,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
 		aq_ret = I40E_ERR_PARAM;
 		goto error_param;
 	}
-
-	tempmap = vqs->rx_queues;
-	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
-	while (queue_id < I40E_MAX_VSI_QP) {
-		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
-			aq_ret = I40E_ERR_PARAM;
-			goto error_param;
-		}
-		i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
-				       I40E_QUEUE_CTRL_ENABLE);
-
-		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
-					 queue_id + 1);
-	}
-
-	tempmap = vqs->tx_queues;
-	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
-	while (queue_id < I40E_MAX_VSI_QP) {
-		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
-			aq_ret = I40E_ERR_PARAM;
-			goto error_param;
-		}
-		i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
-				       I40E_QUEUE_CTRL_ENABLE);
-
-		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
-					 queue_id + 1);
-	}
-
-	/* Poll the status register to make sure that the
-	 * requested op was completed successfully
-	 */
-	udelay(10);
-
-	tempmap = vqs->rx_queues;
-	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
-	while (queue_id < I40E_MAX_VSI_QP) {
-		if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
-					   I40E_QUEUE_CTRL_ENABLECHECK)) {
-			dev_err(&pf->pdev->dev,
-				"Queue control check failed on RX queue %d of VSI %d VF %d\n",
-				queue_id, vsi_id, vf->vf_id);
-		}
-		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
-					 queue_id + 1);
-	}
-
-	tempmap = vqs->tx_queues;
-	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
-	while (queue_id < I40E_MAX_VSI_QP) {
-		if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
-					   I40E_QUEUE_CTRL_ENABLECHECK)) {
-			dev_err(&pf->pdev->dev,
-				"Queue control check failed on TX queue %d of VSI %d VF %d\n",
-				queue_id, vsi_id, vf->vf_id);
-		}
-		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
-					 queue_id + 1);
-	}
-
+	if (i40e_vsi_control_rings(pf->vsi[vsi_id], true))
+		aq_ret = I40E_ERR_TIMEOUT;
 error_param:
 	/* send the response to the vf */
 	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
@@ -1436,8 +1334,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
 	struct i40e_pf *pf = vf->pf;
 	u16 vsi_id = vqs->vsi_id;
 	i40e_status aq_ret = 0;
-	unsigned long tempmap;
-	u16 queue_id;
 
 	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
 		aq_ret = I40E_ERR_PARAM;
@@ -1453,65 +1349,8 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
 		aq_ret = I40E_ERR_PARAM;
 		goto error_param;
 	}
-
-	tempmap = vqs->rx_queues;
-	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
-	while (queue_id < I40E_MAX_VSI_QP) {
-		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
-			aq_ret = I40E_ERR_PARAM;
-			goto error_param;
-		}
-		i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
-				       I40E_QUEUE_CTRL_DISABLE);
-
-		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
-					 queue_id + 1);
-	}
-
-	tempmap = vqs->tx_queues;
-	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
-	while (queue_id < I40E_MAX_VSI_QP) {
-		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
-			aq_ret = I40E_ERR_PARAM;
-			goto error_param;
-		}
-		i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
-				       I40E_QUEUE_CTRL_DISABLE);
-
-		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
-					 queue_id + 1);
-	}
-
-	/* Poll the status register to make sure that the
-	 * requested op was completed successfully
-	 */
-	udelay(10);
-
-	tempmap = vqs->rx_queues;
-	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
-	while (queue_id < I40E_MAX_VSI_QP) {
-		if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
-					   I40E_QUEUE_CTRL_DISABLECHECK)) {
-			dev_err(&pf->pdev->dev,
-				"Queue control check failed on RX queue %d of VSI %d VF %d\n",
-				queue_id, vsi_id, vf->vf_id);
-		}
-		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
-					 queue_id + 1);
-	}
-
-	tempmap = vqs->tx_queues;
-	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
-	while (queue_id < I40E_MAX_VSI_QP) {
-		if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
-					   I40E_QUEUE_CTRL_DISABLECHECK)) {
-			dev_err(&pf->pdev->dev,
-				"Queue control check failed on TX queue %d of VSI %d VF %d\n",
-				queue_id, vsi_id, vf->vf_id);
-		}
-		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
-					 queue_id + 1);
-	}
+	if (i40e_vsi_control_rings(pf->vsi[vsi_id], false))
+		aq_ret = I40E_ERR_TIMEOUT;
 
 error_param:
 	/* send the response to the vf */
@@ -1554,7 +1393,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
 		goto error_param;
 	}
 	i40e_update_eth_stats(vsi);
-	memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats));
+	stats = vsi->eth_stats;
 
 error_param:
 	/* send the response back to the vf */
@@ -1563,6 +1402,40 @@ error_param:
 }
 
 /**
+ * i40e_check_vf_permission
+ * @vf: pointer to the vf info
+ * @macaddr: pointer to the MAC Address being checked
+ *
+ * Check if the VF has permission to add or delete unicast MAC address
+ * filters and return error code -EPERM if not.  Then check if the
+ * address filter requested is broadcast or zero and if so return
+ * an invalid MAC address error code.
+ **/
+static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
+{
+	struct i40e_pf *pf = vf->pf;
+	int ret = 0;
+
+	if (is_broadcast_ether_addr(macaddr) ||
+		   is_zero_ether_addr(macaddr)) {
+		dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
+		ret = I40E_ERR_INVALID_MAC_ADDR;
+	} else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
+		   !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
+		/* If the host VMM administrator has set the VF MAC address
+		 * administratively via the ndo_set_vf_mac command then deny
+		 * permission to the VF to add or delete unicast MAC addresses.
+		 * The VF may request to set the MAC address filter already
+		 * assigned to it so do not return an error in that case.
+		 */
+		dev_err(&pf->pdev->dev,
+			"VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
+		ret = -EPERM;
+	}
+	return ret;
+}
+
+/**
  * i40e_vc_add_mac_addr_msg
  * @vf: pointer to the vf info
  * @msg: pointer to the msg buffer
@@ -1577,24 +1450,20 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
 	struct i40e_pf *pf = vf->pf;
 	struct i40e_vsi *vsi = NULL;
 	u16 vsi_id = al->vsi_id;
-	i40e_status aq_ret = 0;
+	i40e_status ret = 0;
 	int i;
 
 	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
 	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
 	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
-		aq_ret = I40E_ERR_PARAM;
+		ret = I40E_ERR_PARAM;
 		goto error_param;
 	}
 
 	for (i = 0; i < al->num_elements; i++) {
-		if (is_broadcast_ether_addr(al->list[i].addr) ||
-		    is_zero_ether_addr(al->list[i].addr)) {
-			dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n",
-				al->list[i].addr);
-			aq_ret = I40E_ERR_PARAM;
+		ret = i40e_check_vf_permission(vf, al->list[i].addr);
+		if (ret)
 			goto error_param;
-		}
 	}
 	vsi = pf->vsi[vsi_id];
 
@@ -1603,7 +1472,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
 		struct i40e_mac_filter *f;
 
 		f = i40e_find_mac(vsi, al->list[i].addr, true, false);
-		if (f) {
+		if (!f) {
 			if (i40e_is_vsi_in_vlan(vsi))
 				f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
 							 true, false);
@@ -1615,7 +1484,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
 		if (!f) {
 			dev_err(&pf->pdev->dev,
 				"Unable to add VF MAC filter\n");
-			aq_ret = I40E_ERR_PARAM;
+			ret = I40E_ERR_PARAM;
 			goto error_param;
 		}
 	}
@@ -1627,7 +1496,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
 error_param:
 	/* send the response to the vf */
 	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
-				       aq_ret);
+				       ret);
 }
 
 /**
@@ -1645,15 +1514,25 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
 	struct i40e_pf *pf = vf->pf;
 	struct i40e_vsi *vsi = NULL;
 	u16 vsi_id = al->vsi_id;
-	i40e_status aq_ret = 0;
+	i40e_status ret = 0;
 	int i;
 
 	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
 	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
 	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
-		aq_ret = I40E_ERR_PARAM;
+		ret = I40E_ERR_PARAM;
 		goto error_param;
 	}
+
+	for (i = 0; i < al->num_elements; i++) {
+		if (is_broadcast_ether_addr(al->list[i].addr) ||
+		    is_zero_ether_addr(al->list[i].addr)) {
+			dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
+				al->list[i].addr);
+			ret = I40E_ERR_INVALID_MAC_ADDR;
+			goto error_param;
+		}
+	}
 	vsi = pf->vsi[vsi_id];
 
 	/* delete addresses from the list */
@@ -1668,7 +1547,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
 error_param:
 	/* send the response to the vf */
 	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
-				       aq_ret);
+				       ret);
 }
 
 /**
@@ -1777,30 +1656,6 @@ error_param:
 }
 
 /**
- * i40e_vc_fcoe_msg
- * @vf: pointer to the vf info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- *
- * called from the vf for the fcoe msgs
- **/
-static int i40e_vc_fcoe_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
-{
-	i40e_status aq_ret = 0;
-
-	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
-	    !test_bit(I40E_VF_STAT_FCOEENA, &vf->vf_states)) {
-		aq_ret = I40E_ERR_PARAM;
-		goto error_param;
-	}
-	aq_ret = I40E_ERR_NOT_IMPLEMENTED;
-
-error_param:
-	/* send the response to the vf */
-	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_FCOE, aq_ret);
-}
-
-/**
  * i40e_vc_validate_vf_msg
  * @vf: pointer to the vf info
  * @msg: pointer to the msg buffer
@@ -1920,19 +1775,24 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
 int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
 			   u32 v_retval, u8 *msg, u16 msglen)
 {
-	struct i40e_vf *vf = &(pf->vf[vf_id]);
 	struct i40e_hw *hw = &pf->hw;
+	int local_vf_id = vf_id - hw->func_caps.vf_base_id;
+	struct i40e_vf *vf;
 	int ret;
 
 	pf->vf_aq_requests++;
+	if (local_vf_id >= pf->num_alloc_vfs)
+		return -EINVAL;
+	vf = &(pf->vf[local_vf_id]);
 	/* perform basic checks on the msg */
 	ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
 
 	if (ret) {
-		dev_err(&pf->pdev->dev, "invalid message from vf %d\n", vf_id);
+		dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n",
+			local_vf_id, v_opcode, msglen);
 		return ret;
 	}
-	wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
+
 	switch (v_opcode) {
 	case I40E_VIRTCHNL_OP_VERSION:
 		ret = i40e_vc_get_version_msg(vf);
@@ -1941,7 +1801,8 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
 		ret = i40e_vc_get_vf_resources_msg(vf);
 		break;
 	case I40E_VIRTCHNL_OP_RESET_VF:
-		ret = i40e_vc_reset_vf_msg(vf);
+		i40e_vc_reset_vf_msg(vf);
+		ret = 0;
 		break;
 	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
 		ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
@@ -1973,13 +1834,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
 	case I40E_VIRTCHNL_OP_GET_STATS:
 		ret = i40e_vc_get_stats_msg(vf, msg, msglen);
 		break;
-	case I40E_VIRTCHNL_OP_FCOE:
-		ret = i40e_vc_fcoe_msg(vf, msg, msglen);
-		break;
 	case I40E_VIRTCHNL_OP_UNKNOWN:
 	default:
-		dev_err(&pf->pdev->dev,
-			"Unsupported opcode %d from vf %d\n", v_opcode, vf_id);
+		dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n",
+			v_opcode, local_vf_id);
 		ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
 					      I40E_ERR_NOT_IMPLEMENTED);
 		break;
@@ -2015,19 +1873,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
 			/* clear the bit in GLGEN_VFLRSTAT */
 			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
 
-			if (i40e_reset_vf(vf, true))
-				dev_err(&pf->pdev->dev,
-					"Unable to reset the VF %d\n", vf_id);
-			/* free up vf resources to destroy vsi state */
-			i40e_free_vf_res(vf);
-
-			/* allocate new vf resources with the default state */
-			if (i40e_alloc_vf_res(vf))
-				dev_err(&pf->pdev->dev,
-					"Unable to allocate VF resources %d\n",
-					vf_id);
-
-			i40e_enable_vf_mappings(vf);
+			i40e_reset_vf(vf, true);
 		}
 	}
 
@@ -2183,6 +2029,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
 		goto error_param;
 	}
 	memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
+	vf->pf_set_mac = true;
 	dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
 	ret = 0;
 
@@ -2229,6 +2076,20 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
 		goto error_pvid;
 	}
 
+	if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi))
+		dev_err(&pf->pdev->dev,
+			"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
+			vf_id);
+
+	/* Check for condition where there was already a port VLAN ID
+	 * filter set and now it is being deleted by setting it to zero.
+	 * Before deleting all the old VLAN filters we must add new ones
+	 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
+	 * MAC addresses deleted.
+	 */
+	if (!(vlan_id || qos) && vsi->info.pvid)
+		ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
+
 	if (vsi->info.pvid) {
 		/* kill old VLAN */
 		ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
@@ -2243,7 +2104,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
 		ret = i40e_vsi_add_pvid(vsi,
 				vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
 	else
-		i40e_vlan_stripping_disable(vsi);
+		i40e_vsi_remove_pvid(vsi);
 
 	if (vlan_id) {
 		dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
@@ -2257,12 +2118,20 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
 				 vsi->back->hw.aq.asq_last_status);
 			goto error_pvid;
 		}
+		/* Kill non-vlan MAC filters - ignore error return since
+		 * there might not be any non-vlan MAC filters.
+		 */
+		i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY);
 	}
 
 	if (ret) {
 		dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
 		goto error_pvid;
 	}
+	/* The Port VLAN needs to be saved across resets the same as the
+	 * default LAN MAC address.
+	 */
+	vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
 	ret = 0;
 
 error_pvid:
@@ -2294,7 +2163,6 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
 			   int vf_id, struct ifla_vf_info *ivi)
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
-	struct i40e_mac_filter *f, *ftmp;
 	struct i40e_vsi *vsi = np->vsi;
 	struct i40e_pf *pf = vsi->back;
 	struct i40e_vf *vf;
@@ -2318,11 +2186,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
 
 	ivi->vf = vf_id;
 
-	/* first entry of the list is the default ethernet address */
-	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
-		memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS);
-		break;
-	}
+	memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
 
 	ivi->tx_rate = 0;
 	ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 360382cf3040..cc1feee36e12 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
@@ -82,6 +81,8 @@ struct i40e_vf {
 
 	struct i40e_virtchnl_ether_addr default_lan_addr;
 	struct i40e_virtchnl_ether_addr default_fcoe_addr;
+	u16 port_vlan_id;
+	bool pf_set_mac;	/* The VMM admin set the VF MAC address */
 
 	/* VSI indices - actual VSI pointers are maintained in the PF structure
 	 * When assigned, these will be non-zero, because VSI 0 is always
@@ -104,7 +105,7 @@ int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
 int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
 			   u32 v_retval, u8 *msg, u16 msglen);
 int i40e_vc_process_vflr_event(struct i40e_pf *pf);
-int i40e_reset_vf(struct i40e_vf *vf, bool flr);
+void i40e_reset_vf(struct i40e_vf *vf, bool flr);
 void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
 
 /* vf configuration related iplink handlers */
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
new file mode 100644
index 000000000000..e09be37a07a8
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -0,0 +1,33 @@
+################################################################################
+#
+# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+# Copyright(c) 2013 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+## Makefile for the Intel(R) 40GbE VF driver
+#
+#
+
+obj-$(CONFIG_I40EVF) += i40evf.o
+
+i40evf-objs :=	i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \
+		i40e_txrx.o i40e_common.o i40e_adminq.o
+
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
new file mode 100644
index 000000000000..5470ce95936e
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -0,0 +1,927 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_status.h"
+#include "i40e_type.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+
+/**
+ *  i40e_adminq_init_regs - Initialize AdminQ registers
+ *  @hw: pointer to the hardware structure
+ *
+ *  This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+static void i40e_adminq_init_regs(struct i40e_hw *hw)
+{
+	/* set head and tail registers in our local struct */
+	if (hw->mac.type == I40E_MAC_VF) {
+		hw->aq.asq.tail = I40E_VF_ATQT1;
+		hw->aq.asq.head = I40E_VF_ATQH1;
+		hw->aq.asq.len  = I40E_VF_ATQLEN1;
+		hw->aq.arq.tail = I40E_VF_ARQT1;
+		hw->aq.arq.head = I40E_VF_ARQH1;
+		hw->aq.arq.len  = I40E_VF_ARQLEN1;
+	} else {
+		hw->aq.asq.tail = I40E_PF_ATQT;
+		hw->aq.asq.head = I40E_PF_ATQH;
+		hw->aq.asq.len  = I40E_PF_ATQLEN;
+		hw->aq.arq.tail = I40E_PF_ARQT;
+		hw->aq.arq.head = I40E_PF_ARQH;
+		hw->aq.arq.len  = I40E_PF_ARQLEN;
+	}
+}
+
+/**
+ *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ *  @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+{
+	i40e_status ret_code;
+
+	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+					 i40e_mem_atq_ring,
+					 (hw->aq.num_asq_entries *
+					 sizeof(struct i40e_aq_desc)),
+					 I40E_ADMINQ_DESC_ALIGNMENT);
+	if (ret_code)
+		return ret_code;
+
+	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+					  (hw->aq.num_asq_entries *
+					  sizeof(struct i40e_asq_cmd_details)));
+	if (ret_code) {
+		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+		return ret_code;
+	}
+
+	return ret_code;
+}
+
+/**
+ *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ *  @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+{
+	i40e_status ret_code;
+
+	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+					 i40e_mem_arq_ring,
+					 (hw->aq.num_arq_entries *
+					 sizeof(struct i40e_aq_desc)),
+					 I40E_ADMINQ_DESC_ALIGNMENT);
+
+	return ret_code;
+}
+
+/**
+ *  i40e_free_adminq_asq - Free Admin Queue send rings
+ *  @hw: pointer to the hardware structure
+ *
+ *  This assumes the posted send buffers have already been cleaned
+ *  and de-allocated
+ **/
+static void i40e_free_adminq_asq(struct i40e_hw *hw)
+{
+	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ *  i40e_free_adminq_arq - Free Admin Queue receive rings
+ *  @hw: pointer to the hardware structure
+ *
+ *  This assumes the posted receive buffers have already been cleaned
+ *  and de-allocated
+ **/
+static void i40e_free_adminq_arq(struct i40e_hw *hw)
+{
+	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ *  @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
+{
+	i40e_status ret_code;
+	struct i40e_aq_desc *desc;
+	struct i40e_dma_mem *bi;
+	int i;
+
+	/* We'll be allocating the buffer info memory first, then we can
+	 * allocate the mapped buffers for the event processing
+	 */
+
+	/* buffer_info structures do not need alignment */
+	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
+	if (ret_code)
+		goto alloc_arq_bufs;
+	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
+
+	/* allocate the mapped buffers */
+	for (i = 0; i < hw->aq.num_arq_entries; i++) {
+		bi = &hw->aq.arq.r.arq_bi[i];
+		ret_code = i40e_allocate_dma_mem(hw, bi,
+						 i40e_mem_arq_buf,
+						 hw->aq.arq_buf_size,
+						 I40E_ADMINQ_DESC_ALIGNMENT);
+		if (ret_code)
+			goto unwind_alloc_arq_bufs;
+
+		/* now configure the descriptors for use */
+		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+
+		desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+			desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+		desc->opcode = 0;
+		/* This is in accordance with Admin queue design, there is no
+		 * register for buffer size configuration
+		 */
+		desc->datalen = cpu_to_le16((u16)bi->size);
+		desc->retval = 0;
+		desc->cookie_high = 0;
+		desc->cookie_low = 0;
+		desc->params.external.addr_high =
+			cpu_to_le32(upper_32_bits(bi->pa));
+		desc->params.external.addr_low =
+			cpu_to_le32(lower_32_bits(bi->pa));
+		desc->params.external.param0 = 0;
+		desc->params.external.param1 = 0;
+	}
+
+alloc_arq_bufs:
+	return ret_code;
+
+unwind_alloc_arq_bufs:
+	/* don't try to free the one that failed... */
+	i--;
+	for (; i >= 0; i--)
+		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+	return ret_code;
+}
+
+/**
+ *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ *  @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
+{
+	i40e_status ret_code;
+	struct i40e_dma_mem *bi;
+	int i;
+
+	/* No mapped memory needed yet, just the buffer info structures */
+	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
+	if (ret_code)
+		goto alloc_asq_bufs;
+	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
+
+	/* allocate the mapped buffers */
+	for (i = 0; i < hw->aq.num_asq_entries; i++) {
+		bi = &hw->aq.asq.r.asq_bi[i];
+		ret_code = i40e_allocate_dma_mem(hw, bi,
+						 i40e_mem_asq_buf,
+						 hw->aq.asq_buf_size,
+						 I40E_ADMINQ_DESC_ALIGNMENT);
+		if (ret_code)
+			goto unwind_alloc_asq_bufs;
+	}
+alloc_asq_bufs:
+	return ret_code;
+
+unwind_alloc_asq_bufs:
+	/* don't try to free the one that failed... */
+	i--;
+	for (; i >= 0; i--)
+		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+	return ret_code;
+}
+
+/**
+ *  i40e_free_arq_bufs - Free receive queue buffer info elements
+ *  @hw: pointer to the hardware structure
+ **/
+static void i40e_free_arq_bufs(struct i40e_hw *hw)
+{
+	int i;
+
+	/* free descriptors */
+	for (i = 0; i < hw->aq.num_arq_entries; i++)
+		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+	/* free the descriptor memory */
+	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+	/* free the dma header */
+	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ *  i40e_free_asq_bufs - Free send queue buffer info elements
+ *  @hw: pointer to the hardware structure
+ **/
+static void i40e_free_asq_bufs(struct i40e_hw *hw)
+{
+	int i;
+
+	/* only unmap if the address is non-NULL */
+	for (i = 0; i < hw->aq.num_asq_entries; i++)
+		if (hw->aq.asq.r.asq_bi[i].pa)
+			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+	/* free the buffer info list */
+	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+	/* free the descriptor memory */
+	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+	/* free the dma header */
+	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ *  i40e_config_asq_regs - configure ASQ registers
+ *  @hw: pointer to the hardware structure
+ *
+ *  Configure base address and length registers for the transmit queue
+ **/
+static void i40e_config_asq_regs(struct i40e_hw *hw)
+{
+	if (hw->mac.type == I40E_MAC_VF) {
+		/* configure the transmit queue */
+		wr32(hw, I40E_VF_ATQBAH1,
+		    upper_32_bits(hw->aq.asq.desc_buf.pa));
+		wr32(hw, I40E_VF_ATQBAL1,
+		    lower_32_bits(hw->aq.asq.desc_buf.pa));
+		wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
+					  I40E_VF_ATQLEN1_ATQENABLE_MASK));
+	} else {
+		/* configure the transmit queue */
+		wr32(hw, I40E_PF_ATQBAH,
+		    upper_32_bits(hw->aq.asq.desc_buf.pa));
+		wr32(hw, I40E_PF_ATQBAL,
+		    lower_32_bits(hw->aq.asq.desc_buf.pa));
+		wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
+					  I40E_PF_ATQLEN_ATQENABLE_MASK));
+	}
+}
+
+/**
+ *  i40e_config_arq_regs - ARQ register configuration
+ *  @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+static void i40e_config_arq_regs(struct i40e_hw *hw)
+{
+	if (hw->mac.type == I40E_MAC_VF) {
+		/* configure the receive queue */
+		wr32(hw, I40E_VF_ARQBAH1,
+		    upper_32_bits(hw->aq.arq.desc_buf.pa));
+		wr32(hw, I40E_VF_ARQBAL1,
+		    lower_32_bits(hw->aq.arq.desc_buf.pa));
+		wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
+					  I40E_VF_ARQLEN1_ARQENABLE_MASK));
+	} else {
+		/* configure the receive queue */
+		wr32(hw, I40E_PF_ARQBAH,
+		    upper_32_bits(hw->aq.arq.desc_buf.pa));
+		wr32(hw, I40E_PF_ARQBAL,
+		    lower_32_bits(hw->aq.arq.desc_buf.pa));
+		wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
+					  I40E_PF_ARQLEN_ARQENABLE_MASK));
+	}
+
+	/* Update tail in the HW to post pre-allocated buffers */
+	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+}
+
+/**
+ *  i40e_init_asq - main initialization routine for ASQ
+ *  @hw: pointer to the hardware structure
+ *
+ *  This is the main initialization routine for the Admin Send Queue
+ *  Prior to calling this function, drivers *MUST* set the following fields
+ *  in the hw->aq structure:
+ *     - hw->aq.num_asq_entries
+ *     - hw->aq.arq_buf_size
+ *
+ *  Do *NOT* hold the lock when calling this as the memory allocation routines
+ *  called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_asq(struct i40e_hw *hw)
+{
+	i40e_status ret_code = 0;
+
+	if (hw->aq.asq.count > 0) {
+		/* queue already initialized */
+		ret_code = I40E_ERR_NOT_READY;
+		goto init_adminq_exit;
+	}
+
+	/* verify input for valid configuration */
+	if ((hw->aq.num_asq_entries == 0) ||
+	    (hw->aq.asq_buf_size == 0)) {
+		ret_code = I40E_ERR_CONFIG;
+		goto init_adminq_exit;
+	}
+
+	hw->aq.asq.next_to_use = 0;
+	hw->aq.asq.next_to_clean = 0;
+	hw->aq.asq.count = hw->aq.num_asq_entries;
+
+	/* allocate the ring memory */
+	ret_code = i40e_alloc_adminq_asq_ring(hw);
+	if (ret_code)
+		goto init_adminq_exit;
+
+	/* allocate buffers in the rings */
+	ret_code = i40e_alloc_asq_bufs(hw);
+	if (ret_code)
+		goto init_adminq_free_rings;
+
+	/* initialize base registers */
+	i40e_config_asq_regs(hw);
+
+	/* success! */
+	goto init_adminq_exit;
+
+init_adminq_free_rings:
+	i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+	return ret_code;
+}
+
+/**
+ *  i40e_init_arq - initialize ARQ
+ *  @hw: pointer to the hardware structure
+ *
+ *  The main initialization routine for the Admin Receive (Event) Queue.
+ *  Prior to calling this function, drivers *MUST* set the following fields
+ *  in the hw->aq structure:
+ *     - hw->aq.num_asq_entries
+ *     - hw->aq.arq_buf_size
+ *
+ *  Do *NOT* hold the lock when calling this as the memory allocation routines
+ *  called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_arq(struct i40e_hw *hw)
+{
+	i40e_status ret_code = 0;
+
+	if (hw->aq.arq.count > 0) {
+		/* queue already initialized */
+		ret_code = I40E_ERR_NOT_READY;
+		goto init_adminq_exit;
+	}
+
+	/* verify input for valid configuration */
+	if ((hw->aq.num_arq_entries == 0) ||
+	    (hw->aq.arq_buf_size == 0)) {
+		ret_code = I40E_ERR_CONFIG;
+		goto init_adminq_exit;
+	}
+
+	hw->aq.arq.next_to_use = 0;
+	hw->aq.arq.next_to_clean = 0;
+	hw->aq.arq.count = hw->aq.num_arq_entries;
+
+	/* allocate the ring memory */
+	ret_code = i40e_alloc_adminq_arq_ring(hw);
+	if (ret_code)
+		goto init_adminq_exit;
+
+	/* allocate buffers in the rings */
+	ret_code = i40e_alloc_arq_bufs(hw);
+	if (ret_code)
+		goto init_adminq_free_rings;
+
+	/* initialize base registers */
+	i40e_config_arq_regs(hw);
+
+	/* success! */
+	goto init_adminq_exit;
+
+init_adminq_free_rings:
+	i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+	return ret_code;
+}
+
+/**
+ *  i40e_shutdown_asq - shutdown the ASQ
+ *  @hw: pointer to the hardware structure
+ *
+ *  The main shutdown routine for the Admin Send Queue
+ **/
+static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
+{
+	i40e_status ret_code = 0;
+
+	if (hw->aq.asq.count == 0)
+		return I40E_ERR_NOT_READY;
+
+	/* Stop firmware AdminQ processing */
+	wr32(hw, hw->aq.asq.head, 0);
+	wr32(hw, hw->aq.asq.tail, 0);
+	wr32(hw, hw->aq.asq.len, 0);
+
+	/* make sure lock is available */
+	mutex_lock(&hw->aq.asq_mutex);
+
+	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+	/* free ring buffers */
+	i40e_free_asq_bufs(hw);
+
+	mutex_unlock(&hw->aq.asq_mutex);
+
+	return ret_code;
+}
+
+/**
+ *  i40e_shutdown_arq - shutdown ARQ
+ *  @hw: pointer to the hardware structure
+ *
+ *  The main shutdown routine for the Admin Receive Queue
+ **/
+static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
+{
+	i40e_status ret_code = 0;
+
+	if (hw->aq.arq.count == 0)
+		return I40E_ERR_NOT_READY;
+
+	/* Stop firmware AdminQ processing */
+	wr32(hw, hw->aq.arq.head, 0);
+	wr32(hw, hw->aq.arq.tail, 0);
+	wr32(hw, hw->aq.arq.len, 0);
+
+	/* make sure lock is available */
+	mutex_lock(&hw->aq.arq_mutex);
+
+	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+	/* free ring buffers */
+	i40e_free_arq_bufs(hw);
+
+	mutex_unlock(&hw->aq.arq_mutex);
+
+	return ret_code;
+}
+
+/**
+ *  i40evf_init_adminq - main initialization routine for Admin Queue
+ *  @hw: pointer to the hardware structure
+ *
+ *  Prior to calling this function, drivers *MUST* set the following fields
+ *  in the hw->aq structure:
+ *     - hw->aq.num_asq_entries
+ *     - hw->aq.num_arq_entries
+ *     - hw->aq.arq_buf_size
+ *     - hw->aq.asq_buf_size
+ **/
+i40e_status i40evf_init_adminq(struct i40e_hw *hw)
+{
+	i40e_status ret_code;
+
+	/* verify input for valid configuration */
+	if ((hw->aq.num_arq_entries == 0) ||
+	    (hw->aq.num_asq_entries == 0) ||
+	    (hw->aq.arq_buf_size == 0) ||
+	    (hw->aq.asq_buf_size == 0)) {
+		ret_code = I40E_ERR_CONFIG;
+		goto init_adminq_exit;
+	}
+
+	/* initialize locks */
+	mutex_init(&hw->aq.asq_mutex);
+	mutex_init(&hw->aq.arq_mutex);
+
+	/* Set up register offsets */
+	i40e_adminq_init_regs(hw);
+
+	/* allocate the ASQ */
+	ret_code = i40e_init_asq(hw);
+	if (ret_code)
+		goto init_adminq_destroy_locks;
+
+	/* allocate the ARQ */
+	ret_code = i40e_init_arq(hw);
+	if (ret_code)
+		goto init_adminq_free_asq;
+
+	/* success! */
+	goto init_adminq_exit;
+
+init_adminq_free_asq:
+	i40e_shutdown_asq(hw);
+init_adminq_destroy_locks:
+
+init_adminq_exit:
+	return ret_code;
+}
+
+/**
+ *  i40evf_shutdown_adminq - shutdown routine for the Admin Queue
+ *  @hw: pointer to the hardware structure
+ **/
+i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
+{
+	i40e_status ret_code = 0;
+
+	if (i40evf_check_asq_alive(hw))
+		i40evf_aq_queue_shutdown(hw, true);
+
+	i40e_shutdown_asq(hw);
+	i40e_shutdown_arq(hw);
+
+	/* destroy the locks */
+
+	return ret_code;
+}
+
+/**
+ *  i40e_clean_asq - cleans Admin send queue
+ *  @hw: pointer to the hardware structure
+ *
+ *  returns the number of free desc
+ **/
+static u16 i40e_clean_asq(struct i40e_hw *hw)
+{
+	struct i40e_adminq_ring *asq = &(hw->aq.asq);
+	struct i40e_asq_cmd_details *details;
+	u16 ntc = asq->next_to_clean;
+	struct i40e_aq_desc desc_cb;
+	struct i40e_aq_desc *desc;
+
+	desc = I40E_ADMINQ_DESC(*asq, ntc);
+	details = I40E_ADMINQ_DETAILS(*asq, ntc);
+	while (rd32(hw, hw->aq.asq.head) != ntc) {
+		if (details->callback) {
+			I40E_ADMINQ_CALLBACK cb_func =
+					(I40E_ADMINQ_CALLBACK)details->callback;
+			desc_cb = *desc;
+			cb_func(hw, &desc_cb);
+		}
+		memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+		memset((void *)details, 0,
+		       sizeof(struct i40e_asq_cmd_details));
+		ntc++;
+		if (ntc == asq->count)
+			ntc = 0;
+		desc = I40E_ADMINQ_DESC(*asq, ntc);
+		details = I40E_ADMINQ_DETAILS(*asq, ntc);
+	}
+
+	asq->next_to_clean = ntc;
+
+	return I40E_DESC_UNUSED(asq);
+}
+
+/**
+ *  i40evf_asq_done - check if FW has processed the Admin Send Queue
+ *  @hw: pointer to the hw struct
+ *
+ *  Returns true if the firmware has processed all descriptors on the
+ *  admin send queue. Returns false if there are still requests pending.
+ **/
+bool i40evf_asq_done(struct i40e_hw *hw)
+{
+	/* AQ designers suggest use of head for better
+	 * timing reliability than DD bit
+	 */
+	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ *  i40evf_asq_send_command - send command to Admin Queue
+ *  @hw: pointer to the hw struct
+ *  @desc: prefilled descriptor describing the command (non DMA mem)
+ *  @buff: buffer to use for indirect commands
+ *  @buff_size: size of buffer for indirect commands
+ *  @cmd_details: pointer to command details structure
+ *
+ *  This is the main send command driver routine for the Admin Queue send
+ *  queue.  It runs the queue, cleans the queue, etc
+ **/
+i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
+				struct i40e_aq_desc *desc,
+				void *buff, /* can be NULL */
+				u16  buff_size,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	i40e_status status = 0;
+	struct i40e_dma_mem *dma_buff = NULL;
+	struct i40e_asq_cmd_details *details;
+	struct i40e_aq_desc *desc_on_ring;
+	bool cmd_completed = false;
+	u16  retval = 0;
+
+	if (hw->aq.asq.count == 0) {
+		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: Admin queue not initialized.\n");
+		status = I40E_ERR_QUEUE_EMPTY;
+		goto asq_send_command_exit;
+	}
+
+	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+	if (cmd_details) {
+		*details = *cmd_details;
+
+		/* If the cmd_details are defined copy the cookie.  The
+		 * cpu_to_le32 is not needed here because the data is ignored
+		 * by the FW, only used by the driver
+		 */
+		if (details->cookie) {
+			desc->cookie_high =
+				cpu_to_le32(upper_32_bits(details->cookie));
+			desc->cookie_low =
+				cpu_to_le32(lower_32_bits(details->cookie));
+		}
+	} else {
+		memset(details, 0, sizeof(struct i40e_asq_cmd_details));
+	}
+
+	/* clear requested flags and then set additional flags if defined */
+	desc->flags &= ~cpu_to_le16(details->flags_dis);
+	desc->flags |= cpu_to_le16(details->flags_ena);
+
+	mutex_lock(&hw->aq.asq_mutex);
+
+	if (buff_size > hw->aq.asq_buf_size) {
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: Invalid buffer size: %d.\n",
+			   buff_size);
+		status = I40E_ERR_INVALID_SIZE;
+		goto asq_send_command_error;
+	}
+
+	if (details->postpone && !details->async) {
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: Async flag not set along with postpone flag");
+		status = I40E_ERR_PARAM;
+		goto asq_send_command_error;
+	}
+
+	/* call clean and check queue available function to reclaim the
+	 * descriptors that were processed by FW, the function returns the
+	 * number of desc available
+	 */
+	/* the clean function called here could be called in a separate thread
+	 * in case of asynchronous completions
+	 */
+	if (i40e_clean_asq(hw) == 0) {
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: Error queue is full.\n");
+		status = I40E_ERR_ADMIN_QUEUE_FULL;
+		goto asq_send_command_error;
+	}
+
+	/* initialize the temp desc pointer with the right desc */
+	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+	/* if the desc is available copy the temp desc to the right place */
+	*desc_on_ring = *desc;
+
+	/* if buff is not NULL assume indirect command */
+	if (buff != NULL) {
+		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+		/* copy the user buff into the respective DMA buff */
+		memcpy(dma_buff->va, buff, buff_size);
+		desc_on_ring->datalen = cpu_to_le16(buff_size);
+
+		/* Update the address values in the desc with the pa value
+		 * for respective buffer
+		 */
+		desc_on_ring->params.external.addr_high =
+				cpu_to_le32(upper_32_bits(dma_buff->pa));
+		desc_on_ring->params.external.addr_low =
+				cpu_to_le32(lower_32_bits(dma_buff->pa));
+	}
+
+	/* bump the tail */
+	i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
+	(hw->aq.asq.next_to_use)++;
+	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+		hw->aq.asq.next_to_use = 0;
+	if (!details->postpone)
+		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+	/* if cmd_details are not defined or async flag is not set,
+	 * we need to wait for desc write back
+	 */
+	if (!details->async && !details->postpone) {
+		u32 total_delay = 0;
+		u32 delay_len = 10;
+
+		do {
+			/* AQ designers suggest use of head for better
+			 * timing reliability than DD bit
+			 */
+			if (i40evf_asq_done(hw))
+				break;
+			/* ugh! delay while spin_lock */
+			udelay(delay_len);
+			total_delay += delay_len;
+		} while (total_delay <  I40E_ASQ_CMD_TIMEOUT);
+	}
+
+	/* if ready, copy the desc back to temp */
+	if (i40evf_asq_done(hw)) {
+		*desc = *desc_on_ring;
+		if (buff != NULL)
+			memcpy(buff, dma_buff->va, buff_size);
+		retval = le16_to_cpu(desc->retval);
+		if (retval != 0) {
+			i40e_debug(hw,
+				   I40E_DEBUG_AQ_MESSAGE,
+				   "AQTX: Command completed with error 0x%X.\n",
+				   retval);
+			/* strip off FW internal code */
+			retval &= 0xff;
+		}
+		cmd_completed = true;
+		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+			status = 0;
+		else
+			status = I40E_ERR_ADMIN_QUEUE_ERROR;
+		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+	}
+
+	/* update the error if time out occurred */
+	if ((!cmd_completed) &&
+	    (!details->async && !details->postpone)) {
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: Writeback timeout.\n");
+		status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+	}
+
+asq_send_command_error:
+	mutex_unlock(&hw->aq.asq_mutex);
+asq_send_command_exit:
+	return status;
+}
+
+/**
+ *  i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function
+ *  @desc:     pointer to the temp descriptor (non DMA mem)
+ *  @opcode:   the opcode can be used to decide which flags to turn off or on
+ *
+ *  Fill the desc with default values
+ **/
+void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+				       u16 opcode)
+{
+	/* zero out the desc */
+	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+	desc->opcode = cpu_to_le16(opcode);
+	desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
+}
+
+/**
+ *  i40evf_clean_arq_element
+ *  @hw: pointer to the hw struct
+ *  @e: event info from the receive descriptor, includes any buffers
+ *  @pending: number of events that could be left to process
+ *
+ *  This function cleans one Admin Receive Queue element and returns
+ *  the contents through e.  It can also return how many events are
+ *  left to process through 'pending'
+ **/
+i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
+					     struct i40e_arq_event_info *e,
+					     u16 *pending)
+{
+	i40e_status ret_code = 0;
+	u16 ntc = hw->aq.arq.next_to_clean;
+	struct i40e_aq_desc *desc;
+	struct i40e_dma_mem *bi;
+	u16 desc_idx;
+	u16 datalen;
+	u16 flags;
+	u16 ntu;
+
+	/* take the lock before we start messing with the ring */
+	mutex_lock(&hw->aq.arq_mutex);
+
+	/* set next_to_use to head */
+	ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+	if (ntu == ntc) {
+		/* nothing to do - shouldn't need to update ring's values */
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQRX: Queue is empty.\n");
+		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+		goto clean_arq_element_out;
+	}
+
+	/* now clean the next descriptor */
+	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+	desc_idx = ntc;
+	i40evf_debug_aq(hw,
+		      I40E_DEBUG_AQ_COMMAND,
+		      (void *)desc,
+		      hw->aq.arq.r.arq_bi[desc_idx].va);
+
+	flags = le16_to_cpu(desc->flags);
+	if (flags & I40E_AQ_FLAG_ERR) {
+		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+		hw->aq.arq_last_status =
+			(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQRX: Event received with error 0x%X.\n",
+			   hw->aq.arq_last_status);
+	} else {
+		e->desc = *desc;
+		datalen = le16_to_cpu(desc->datalen);
+		e->msg_size = min(datalen, e->msg_size);
+		if (e->msg_buf != NULL && (e->msg_size != 0))
+			memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
+			       e->msg_size);
+	}
+
+	/* Restore the original datalen and buffer address in the desc,
+	 * FW updates datalen to indicate the event message
+	 * size
+	 */
+	bi = &hw->aq.arq.r.arq_bi[ntc];
+	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+
+	desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+		desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+	desc->datalen = cpu_to_le16((u16)bi->size);
+	desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+	desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+
+	/* set tail = the last cleaned desc index. */
+	wr32(hw, hw->aq.arq.tail, ntc);
+	/* ntc is updated to tail + 1 */
+	ntc++;
+	if (ntc == hw->aq.num_arq_entries)
+		ntc = 0;
+	hw->aq.arq.next_to_clean = ntc;
+	hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+	/* Set pending if needed, unlock and return */
+	if (pending != NULL)
+		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+	mutex_unlock(&hw->aq.arq_mutex);
+
+	return ret_code;
+}
+
+void i40evf_resume_aq(struct i40e_hw *hw)
+{
+	/* Registers are reset after PF reset */
+	hw->aq.asq.next_to_use = 0;
+	hw->aq.asq.next_to_clean = 0;
+
+	i40e_config_asq_regs(hw);
+
+	hw->aq.arq.next_to_use = 0;
+	hw->aq.arq.next_to_clean = 0;
+
+	i40e_config_arq_regs(hw);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
new file mode 100644
index 000000000000..8f72c31d95cc
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -0,0 +1,106 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_H_
+#define _I40E_ADMINQ_H_
+
+#include "i40e_osdep.h"
+#include "i40e_adminq_cmd.h"
+
+#define I40E_ADMINQ_DESC(R, i)   \
+	(&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+
+#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+
+struct i40e_adminq_ring {
+	struct i40e_virt_mem dma_head;	/* space for dma structures */
+	struct i40e_dma_mem desc_buf;	/* descriptor ring memory */
+	struct i40e_virt_mem cmd_buf;	/* command buffer memory */
+
+	union {
+		struct i40e_dma_mem *asq_bi;
+		struct i40e_dma_mem *arq_bi;
+	} r;
+
+	u16 count;		/* Number of descriptors */
+	u16 rx_buf_len;		/* Admin Receive Queue buffer length */
+
+	/* used for interrupt processing */
+	u16 next_to_use;
+	u16 next_to_clean;
+
+	/* used for queue tracking */
+	u32 head;
+	u32 tail;
+	u32 len;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+	void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+	u64 cookie;
+	u16 flags_ena;
+	u16 flags_dis;
+	bool async;
+	bool postpone;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i)   \
+	(&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+	struct i40e_aq_desc desc;
+	u16 msg_size;
+	u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct i40e_adminq_info {
+	struct i40e_adminq_ring arq;    /* receive queue */
+	struct i40e_adminq_ring asq;    /* send queue */
+	u16 num_arq_entries;            /* receive queue depth */
+	u16 num_asq_entries;            /* send queue depth */
+	u16 arq_buf_size;               /* receive queue buffer size */
+	u16 asq_buf_size;               /* send queue buffer size */
+	u16 fw_maj_ver;                 /* firmware major version */
+	u16 fw_min_ver;                 /* firmware minor version */
+	u16 api_maj_ver;                /* api major version */
+	u16 api_min_ver;                /* api minor version */
+
+	struct mutex asq_mutex; /* Send queue lock */
+	struct mutex arq_mutex; /* Receive queue lock */
+
+	/* last status values on send and receive queues */
+	enum i40e_admin_queue_err asq_last_status;
+	enum i40e_admin_queue_err arq_last_status;
+};
+
+/* general information */
+#define I40E_AQ_LARGE_BUF	512
+#define I40E_ASQ_CMD_TIMEOUT	100000  /* usecs */
+
+void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+				       u16 opcode);
+
+#endif /* _I40E_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
new file mode 100644
index 000000000000..f7cea1bca38d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -0,0 +1,2153 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR  0x0001
+#define I40E_FW_API_VERSION_MINOR  0x0001
+#define I40E_FW_API_VERSION_A0_MINOR  0x0000
+
+struct i40e_aq_desc {
+	__le16 flags;
+	__le16 opcode;
+	__le16 datalen;
+	__le16 retval;
+	__le32 cookie_high;
+	__le32 cookie_low;
+	union {
+		struct {
+			__le32 param0;
+			__le32 param1;
+			__le32 param2;
+			__le32 param3;
+		} internal;
+		struct {
+			__le32 param0;
+			__le32 param1;
+			__le32 addr_high;
+			__le32 addr_low;
+		} external;
+		u8 raw[16];
+	} params;
+};
+
+/* Flags sub-structure
+ * |0  |1  |2  |3  |4  |5  |6  |7  |8  |9  |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * *  RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT  0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT  9
+#define I40E_AQ_FLAG_RD_SHIFT  10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT  13
+#define I40E_AQ_FLAG_EI_SHIFT  14
+#define I40E_AQ_FLAG_FE_SHIFT  15
+
+#define I40E_AQ_FLAG_DD  (1 << I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
+#define I40E_AQ_FLAG_LB  (1 << I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
+#define I40E_AQ_FLAG_RD  (1 << I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI  (1 << I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
+#define I40E_AQ_FLAG_EI  (1 << I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
+#define I40E_AQ_FLAG_FE  (1 << I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+	I40E_AQ_RC_OK       = 0,    /* success */
+	I40E_AQ_RC_EPERM    = 1,    /* Operation not permitted */
+	I40E_AQ_RC_ENOENT   = 2,    /* No such element */
+	I40E_AQ_RC_ESRCH    = 3,    /* Bad opcode */
+	I40E_AQ_RC_EINTR    = 4,    /* operation interrupted */
+	I40E_AQ_RC_EIO      = 5,    /* I/O error */
+	I40E_AQ_RC_ENXIO    = 6,    /* No such resource */
+	I40E_AQ_RC_E2BIG    = 7,    /* Arg too long */
+	I40E_AQ_RC_EAGAIN   = 8,    /* Try again */
+	I40E_AQ_RC_ENOMEM   = 9,    /* Out of memory */
+	I40E_AQ_RC_EACCES   = 10,   /* Permission denied */
+	I40E_AQ_RC_EFAULT   = 11,   /* Bad address */
+	I40E_AQ_RC_EBUSY    = 12,   /* Device or resource busy */
+	I40E_AQ_RC_EEXIST   = 13,   /* object already exists */
+	I40E_AQ_RC_EINVAL   = 14,   /* Invalid argument */
+	I40E_AQ_RC_ENOTTY   = 15,   /* Not a typewriter */
+	I40E_AQ_RC_ENOSPC   = 16,   /* No space left or alloc failure */
+	I40E_AQ_RC_ENOSYS   = 17,   /* Function not implemented */
+	I40E_AQ_RC_ERANGE   = 18,   /* Parameter out of range */
+	I40E_AQ_RC_EFLUSHED = 19,   /* Cmd flushed because of prev cmd error */
+	I40E_AQ_RC_BAD_ADDR = 20,   /* Descriptor contains a bad pointer */
+	I40E_AQ_RC_EMODE    = 21,   /* Op not allowed in current dev mode */
+	I40E_AQ_RC_EFBIG    = 22,   /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+	/* aq commands */
+	i40e_aqc_opc_get_version      = 0x0001,
+	i40e_aqc_opc_driver_version   = 0x0002,
+	i40e_aqc_opc_queue_shutdown   = 0x0003,
+
+	/* resource ownership */
+	i40e_aqc_opc_request_resource = 0x0008,
+	i40e_aqc_opc_release_resource = 0x0009,
+
+	i40e_aqc_opc_list_func_capabilities = 0x000A,
+	i40e_aqc_opc_list_dev_capabilities  = 0x000B,
+
+	i40e_aqc_opc_set_cppm_configuration = 0x0103,
+	i40e_aqc_opc_set_arp_proxy_entry    = 0x0104,
+	i40e_aqc_opc_set_ns_proxy_entry     = 0x0105,
+
+	/* LAA */
+	i40e_aqc_opc_mng_laa                = 0x0106,   /* AQ obsolete */
+	i40e_aqc_opc_mac_address_read       = 0x0107,
+	i40e_aqc_opc_mac_address_write      = 0x0108,
+
+	/* PXE */
+	i40e_aqc_opc_clear_pxe_mode         = 0x0110,
+
+	/* internal switch commands */
+	i40e_aqc_opc_get_switch_config         = 0x0200,
+	i40e_aqc_opc_add_statistics            = 0x0201,
+	i40e_aqc_opc_remove_statistics         = 0x0202,
+	i40e_aqc_opc_set_port_parameters       = 0x0203,
+	i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+
+	i40e_aqc_opc_add_vsi                = 0x0210,
+	i40e_aqc_opc_update_vsi_parameters  = 0x0211,
+	i40e_aqc_opc_get_vsi_parameters     = 0x0212,
+
+	i40e_aqc_opc_add_pv                = 0x0220,
+	i40e_aqc_opc_update_pv_parameters  = 0x0221,
+	i40e_aqc_opc_get_pv_parameters     = 0x0222,
+
+	i40e_aqc_opc_add_veb               = 0x0230,
+	i40e_aqc_opc_update_veb_parameters = 0x0231,
+	i40e_aqc_opc_get_veb_parameters    = 0x0232,
+
+	i40e_aqc_opc_delete_element  = 0x0243,
+
+	i40e_aqc_opc_add_macvlan                  = 0x0250,
+	i40e_aqc_opc_remove_macvlan               = 0x0251,
+	i40e_aqc_opc_add_vlan                     = 0x0252,
+	i40e_aqc_opc_remove_vlan                  = 0x0253,
+	i40e_aqc_opc_set_vsi_promiscuous_modes    = 0x0254,
+	i40e_aqc_opc_add_tag                      = 0x0255,
+	i40e_aqc_opc_remove_tag                   = 0x0256,
+	i40e_aqc_opc_add_multicast_etag           = 0x0257,
+	i40e_aqc_opc_remove_multicast_etag        = 0x0258,
+	i40e_aqc_opc_update_tag                   = 0x0259,
+	i40e_aqc_opc_add_control_packet_filter    = 0x025A,
+	i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+	i40e_aqc_opc_add_cloud_filters            = 0x025C,
+	i40e_aqc_opc_remove_cloud_filters         = 0x025D,
+
+	i40e_aqc_opc_add_mirror_rule    = 0x0260,
+	i40e_aqc_opc_delete_mirror_rule = 0x0261,
+
+	i40e_aqc_opc_set_storm_control_config = 0x0280,
+	i40e_aqc_opc_get_storm_control_config = 0x0281,
+
+	/* DCB commands */
+	i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+	i40e_aqc_opc_dcb_updated    = 0x0302,
+
+	/* TX scheduler */
+	i40e_aqc_opc_configure_vsi_bw_limit            = 0x0400,
+	i40e_aqc_opc_configure_vsi_ets_sla_bw_limit    = 0x0406,
+	i40e_aqc_opc_configure_vsi_tc_bw               = 0x0407,
+	i40e_aqc_opc_query_vsi_bw_config               = 0x0408,
+	i40e_aqc_opc_query_vsi_ets_sla_config          = 0x040A,
+	i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+	i40e_aqc_opc_enable_switching_comp_ets             = 0x0413,
+	i40e_aqc_opc_modify_switching_comp_ets             = 0x0414,
+	i40e_aqc_opc_disable_switching_comp_ets            = 0x0415,
+	i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+	i40e_aqc_opc_configure_switching_comp_bw_config    = 0x0417,
+	i40e_aqc_opc_query_switching_comp_ets_config       = 0x0418,
+	i40e_aqc_opc_query_port_ets_config                 = 0x0419,
+	i40e_aqc_opc_query_switching_comp_bw_config        = 0x041A,
+	i40e_aqc_opc_suspend_port_tx                       = 0x041B,
+	i40e_aqc_opc_resume_port_tx                        = 0x041C,
+
+	/* hmc */
+	i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+	i40e_aqc_opc_set_hmc_resource_profile   = 0x0501,
+
+	/* phy commands*/
+	i40e_aqc_opc_get_phy_abilities   = 0x0600,
+	i40e_aqc_opc_set_phy_config      = 0x0601,
+	i40e_aqc_opc_set_mac_config      = 0x0603,
+	i40e_aqc_opc_set_link_restart_an = 0x0605,
+	i40e_aqc_opc_get_link_status     = 0x0607,
+	i40e_aqc_opc_set_phy_int_mask    = 0x0613,
+	i40e_aqc_opc_get_local_advt_reg  = 0x0614,
+	i40e_aqc_opc_set_local_advt_reg  = 0x0615,
+	i40e_aqc_opc_get_partner_advt    = 0x0616,
+	i40e_aqc_opc_set_lb_modes        = 0x0618,
+	i40e_aqc_opc_get_phy_wol_caps    = 0x0621,
+	i40e_aqc_opc_set_phy_reset       = 0x0622,
+	i40e_aqc_opc_upload_ext_phy_fm   = 0x0625,
+
+	/* NVM commands */
+	i40e_aqc_opc_nvm_read   = 0x0701,
+	i40e_aqc_opc_nvm_erase  = 0x0702,
+	i40e_aqc_opc_nvm_update = 0x0703,
+
+	/* virtualization commands */
+	i40e_aqc_opc_send_msg_to_pf   = 0x0801,
+	i40e_aqc_opc_send_msg_to_vf   = 0x0802,
+	i40e_aqc_opc_send_msg_to_peer = 0x0803,
+
+	/* alternate structure */
+	i40e_aqc_opc_alternate_write          = 0x0900,
+	i40e_aqc_opc_alternate_write_indirect = 0x0901,
+	i40e_aqc_opc_alternate_read           = 0x0902,
+	i40e_aqc_opc_alternate_read_indirect  = 0x0903,
+	i40e_aqc_opc_alternate_write_done     = 0x0904,
+	i40e_aqc_opc_alternate_set_mode       = 0x0905,
+	i40e_aqc_opc_alternate_clear_port     = 0x0906,
+
+	/* LLDP commands */
+	i40e_aqc_opc_lldp_get_mib    = 0x0A00,
+	i40e_aqc_opc_lldp_update_mib = 0x0A01,
+	i40e_aqc_opc_lldp_add_tlv    = 0x0A02,
+	i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+	i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+	i40e_aqc_opc_lldp_stop       = 0x0A05,
+	i40e_aqc_opc_lldp_start      = 0x0A06,
+
+	/* Tunnel commands */
+	i40e_aqc_opc_add_udp_tunnel       = 0x0B00,
+	i40e_aqc_opc_del_udp_tunnel       = 0x0B01,
+	i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+
+	/* Async Events */
+	i40e_aqc_opc_event_lan_overflow = 0x1001,
+
+	/* OEM commands */
+	i40e_aqc_opc_oem_parameter_change     = 0xFE00,
+	i40e_aqc_opc_oem_device_status_change = 0xFE01,
+
+	/* debug commands */
+	i40e_aqc_opc_debug_get_deviceid     = 0xFF00,
+	i40e_aqc_opc_debug_set_mode         = 0xFF01,
+	i40e_aqc_opc_debug_read_reg         = 0xFF03,
+	i40e_aqc_opc_debug_write_reg        = 0xFF04,
+	i40e_aqc_opc_debug_read_reg_sg      = 0xFF05,
+	i40e_aqc_opc_debug_write_reg_sg     = 0xFF06,
+	i40e_aqc_opc_debug_modify_reg       = 0xFF07,
+	i40e_aqc_opc_debug_dump_internals   = 0xFF08,
+	i40e_aqc_opc_debug_modify_internals = 0xFF09,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+	{ i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct i40e_aqc_get_version {
+	__le32 rom_ver;
+	__le32 fw_build;
+	__le16 fw_major;
+	__le16 fw_minor;
+	__le16 api_major;
+	__le16 api_minor;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
+
+/* Send driver version (indirect 0x0002) */
+struct i40e_aqc_driver_version {
+	u8     driver_major_ver;
+	u8     driver_minor_ver;
+	u8     driver_build_ver;
+	u8     driver_subbuild_ver;
+	u8     reserved[4];
+	__le32 address_high;
+	__le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+	__le32     driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING    0x1
+	u8     reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+#define I40E_AQ_RESOURCE_NVM               1
+#define I40E_AQ_RESOURCE_SDP               2
+#define I40E_AQ_RESOURCE_ACCESS_READ       1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE      2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT  3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+
+struct i40e_aqc_request_resource {
+	__le16 resource_id;
+	__le16 access_type;
+	__le32 timeout;
+	__le32 resource_number;
+	u8     reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct i40e_aqc_list_capabilites {
+	u8 command_flags;
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN     1
+	u8 pf_index;
+	u8 reserved[2];
+	__le32 count;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
+
+struct i40e_aqc_list_capabilities_element_resp {
+	__le16 id;
+	u8     major_rev;
+	u8     minor_rev;
+	__le32 number;
+	__le32 logical_id;
+	__le32 phys_id;
+	u8     reserved[16];
+};
+
+/* list of caps */
+
+#define I40E_AQ_CAP_ID_SWITCH_MODE      0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE         0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE      0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP       0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID  0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM    0x0006
+#define I40E_AQ_CAP_ID_SRIOV            0x0012
+#define I40E_AQ_CAP_ID_VF               0x0013
+#define I40E_AQ_CAP_ID_VMDQ             0x0014
+#define I40E_AQ_CAP_ID_8021QBG          0x0015
+#define I40E_AQ_CAP_ID_8021QBR          0x0016
+#define I40E_AQ_CAP_ID_VSI              0x0017
+#define I40E_AQ_CAP_ID_DCB              0x0018
+#define I40E_AQ_CAP_ID_FCOE             0x0021
+#define I40E_AQ_CAP_ID_RSS              0x0040
+#define I40E_AQ_CAP_ID_RXQ              0x0041
+#define I40E_AQ_CAP_ID_TXQ              0x0042
+#define I40E_AQ_CAP_ID_MSIX             0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX          0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR    0x0045
+#define I40E_AQ_CAP_ID_1588             0x0046
+#define I40E_AQ_CAP_ID_IWARP            0x0051
+#define I40E_AQ_CAP_ID_LED              0x0061
+#define I40E_AQ_CAP_ID_SDP              0x0062
+#define I40E_AQ_CAP_ID_MDIO             0x0063
+#define I40E_AQ_CAP_ID_FLEX10           0x00F1
+#define I40E_AQ_CAP_ID_CEM              0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct i40e_aqc_cppm_configuration {
+	__le16 command_flags;
+#define I40E_AQ_CPPM_EN_LTRC    0x0800
+#define I40E_AQ_CPPM_EN_DMCTH   0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX  0x2000
+#define I40E_AQ_CPPM_EN_HPTC    0x4000
+#define I40E_AQ_CPPM_EN_DMARC   0x8000
+	__le16 ttlx;
+	__le32 dmacr;
+	__le16 dmcth;
+	u8     hptc;
+	u8     reserved;
+	__le32 pfltrc;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct i40e_aqc_arp_proxy_data {
+	__le16 command_flags;
+#define I40E_AQ_ARP_INIT_IPV4           0x0008
+#define I40E_AQ_ARP_UNSUP_CTL           0x0010
+#define I40E_AQ_ARP_ENA                 0x0020
+#define I40E_AQ_ARP_ADD_IPV4            0x0040
+#define I40E_AQ_ARP_DEL_IPV4            0x0080
+	__le16 table_id;
+	__le32 pfpm_proxyfc;
+	__le32 ip_addr;
+	u8     mac_addr[6];
+};
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct i40e_aqc_ns_proxy_data {
+	__le16 table_idx_mac_addr_0;
+	__le16 table_idx_mac_addr_1;
+	__le16 table_idx_ipv6_0;
+	__le16 table_idx_ipv6_1;
+	__le16 control;
+#define I40E_AQ_NS_PROXY_ADD_0             0x0100
+#define I40E_AQ_NS_PROXY_DEL_0             0x0200
+#define I40E_AQ_NS_PROXY_ADD_1             0x0400
+#define I40E_AQ_NS_PROXY_DEL_1             0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0        0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0        0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1        0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1        0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ       0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL     0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL      0x0004
+	u8     mac_addr_0[6];
+	u8     mac_addr_1[6];
+	u8     local_mac_addr[6];
+	u8     ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+	u8     ipv6_addr_1[16];
+};
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct i40e_aqc_mng_laa {
+	__le16	command_flags;
+#define I40E_AQ_LAA_FLAG_WR   0x8000
+	u8     reserved[2];
+	__le32 sal;
+	__le16 sah;
+	u8     reserved2[6];
+};
+
+/* Manage MAC Address Read Command (indirect 0x0107) */
+struct i40e_aqc_mac_address_read {
+	__le16	command_flags;
+#define I40E_AQC_LAN_ADDR_VALID   0x10
+#define I40E_AQC_SAN_ADDR_VALID   0x20
+#define I40E_AQC_PORT_ADDR_VALID  0x40
+#define I40E_AQC_WOL_ADDR_VALID   0x80
+#define I40E_AQC_ADDR_VALID_MASK  0xf0
+	u8     reserved[6];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
+
+struct i40e_aqc_mac_address_read_data {
+	u8 pf_lan_mac[6];
+	u8 pf_san_mac[6];
+	u8 port_mac[6];
+	u8 pf_wol_mac[6];
+};
+
+I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct i40e_aqc_mac_address_write {
+	__le16 command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY    0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL     0x4000
+#define I40E_AQC_WRITE_TYPE_PORT        0x8000
+#define I40E_AQC_WRITE_TYPE_MASK        0xc000
+	__le16 mac_sah;
+	__le32 mac_sal;
+	u8     reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response  (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+	u8	rx_cnt;
+	u8	reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct i40e_aqc_switch_seid {
+	__le16 seid;
+	u8     reserved[6];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_switch_config_header_resp {
+	__le16 num_reported;
+	__le16 num_total;
+	u8     reserved[12];
+};
+
+struct i40e_aqc_switch_config_element_resp {
+	u8     element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC        1
+#define I40E_AQ_SW_ELEM_TYPE_PF         2
+#define I40E_AQ_SW_ELEM_TYPE_VF         3
+#define I40E_AQ_SW_ELEM_TYPE_EMP        4
+#define I40E_AQ_SW_ELEM_TYPE_BMC        5
+#define I40E_AQ_SW_ELEM_TYPE_PV         16
+#define I40E_AQ_SW_ELEM_TYPE_VEB        17
+#define I40E_AQ_SW_ELEM_TYPE_PA         18
+#define I40E_AQ_SW_ELEM_TYPE_VSI        19
+	u8     revision;
+#define I40E_AQ_SW_ELEM_REV_1           1
+	__le16 seid;
+	__le16 uplink_seid;
+	__le16 downlink_seid;
+	u8     reserved[3];
+	u8     connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR       0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT       0x2
+#define I40E_AQ_CONN_TYPE_CASCADED      0x3
+	__le16 scheduler_id;
+	__le16 element_info;
+};
+
+/* Get Switch Configuration (indirect 0x0200)
+ *    an array of elements are returned in the response buffer
+ *    the first in the array is the header, remainder are elements
+ */
+struct i40e_aqc_get_switch_config_resp {
+	struct i40e_aqc_get_switch_config_header_resp header;
+	struct i40e_aqc_switch_config_element_resp    element[1];
+};
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct i40e_aqc_add_remove_statistics {
+	__le16 seid;
+	__le16 vlan;
+	__le16 stat_index;
+	u8     reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct i40e_aqc_set_port_parameters {
+	__le16 command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS   1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS  2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA    4
+	__le16 bad_frame_vsi;
+	__le16 default_seid;        /* reserved for command */
+	u8     reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct i40e_aqc_get_switch_resource_alloc {
+	u8     num_entries;         /* reserved for command */
+	u8     reserved[7];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct i40e_aqc_switch_resource_alloc_element_resp {
+	u8     resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB                 0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI                 0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR             0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG                0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG                0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH      0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH        0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN                0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY      0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY     0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL      0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE         0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS          0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS        0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS   0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS          0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS         0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS            0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS        0x13
+	u8     reserved1;
+	__le16 guaranteed;
+	__le16 total;
+	__le16 used;
+	__le16 total_unalloced;
+	u8     reserved2[6];
+};
+
+/* Add VSI (indirect 0x0210)
+ *    this indirect command uses struct i40e_aqc_vsi_properties_data
+ *    as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211)
+ *     uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ *     uses the same completion and data structure as Add VSI
+ */
+struct i40e_aqc_add_get_update_vsi {
+	__le16 uplink_seid;
+	u8     connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL            0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT           0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED          0x3
+	u8     reserved1;
+	u8     vf_id;
+	u8     reserved2;
+	__le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT          0x0
+#define I40E_AQ_VSI_TYPE_MASK           (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF             0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2          0x1
+#define I40E_AQ_VSI_TYPE_PF             0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG        0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV    0x4
+#define I40E_AQ_VSI_FLAG_CLOUD_VSI      0x8
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
+
+struct i40e_aqc_add_get_update_vsi_completion {
+	__le16 seid;
+	__le16 vsi_number;
+	__le16 vsi_used;
+	__le16 vsi_free;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
+
+struct i40e_aqc_vsi_properties_data {
+	/* first 96 byte are written by SW */
+	__le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID       0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID     0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID         0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID       0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID   0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID    0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID    0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID    0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID     0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID        0x0200
+	/* switch section */
+	__le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT             0x0000
+#define I40E_AQ_VSI_SW_ID_MASK              (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG     0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB     0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB     0x4000
+	u8     sw_reserved[2];
+	/* security section */
+	u8     sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD    0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK    0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK     0x04
+	u8     sec_reserved;
+	/* VLAN section */
+	__le16 pvid; /* VLANS include priority bits */
+	__le16 fcoe_pvid;
+	u8     port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT        0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK         (0x03 << \
+						I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED       0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED     0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL          0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID       0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT        0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK         (0x3 << \
+					I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH     0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP       0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR          0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING      0x18
+	u8     pvlan_reserved[3];
+	/* ingress egress up sections */
+	__le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT      0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK       (0x7 << \
+					I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT      3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK       (0x7 << \
+					I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT      6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK       (0x7 << \
+					I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT      9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK       (0x7 << \
+					I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT      12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK       (0x7 << \
+					I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT      15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK       (0x7 << \
+					I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT      18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK       (0x7 << \
+					I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT      21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK       (0x7 << \
+					I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+	__le32 egress_table;   /* same defines as for ingress table */
+	/* cascaded PV section */
+	__le16 cas_pv_tag;
+	u8     cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT      0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK       (0x03 << \
+						I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE      0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE     0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY       0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG      0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE      0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+	u8     cas_pv_reserved;
+	/* queue mapping section */
+	__le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG          0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG       0x1
+	__le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT             0x0
+#define I40E_AQ_VSI_QUEUE_MASK              (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+	__le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT     0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK      (0x1FF << \
+						I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT     9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK      (0x7 << \
+						I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+	/* queueing option section */
+	u8     queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA         0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA        0x20
+	u8     queueing_opt_reserved[3];
+	/* scheduler section */
+	u8     up_enable_bits;
+	u8     sched_reserved;
+	/* outer up section */
+	__le32 outer_up_table; /* same structure and defines as ingress table */
+	u8     cmd_reserved[8];
+	/* last 32 bytes are written by FW */
+	__le16 qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID	0xFFFF
+	__le16 stat_counter_idx;
+	__le16 sched_id;
+	u8     resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct i40e_aqc_add_update_pv {
+	__le16 command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE                0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN    0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN    0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT           0x8
+	__le16 uplink_seid;
+	__le16 connected_seid;
+	u8     reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
+
+struct i40e_aqc_add_update_pv_completion {
+	/* reserved for update; for add also encodes error if rc == ENOSPC */
+	__le16 pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV               0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED            0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER          0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY            0x8
+	u8     reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+
+struct i40e_aqc_get_pv_params_completion {
+	__le16 seid;
+	__le16 default_stag;
+	__le16 pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE            0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG  0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG  0x4
+	u8     reserved[8];
+	__le16 default_port_seid;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct i40e_aqc_add_veb {
+	__le16 uplink_seid;
+	__le16 downlink_seid;
+	__le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING           0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT    1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK     (0x3 << \
+					I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT  0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA     0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER   0x8
+	u8     enable_tcs;
+	u8     reserved[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
+
+struct i40e_aqc_add_veb_completion {
+	u8     reserved[6];
+	__le16 switch_seid;
+	/* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+	__le16 veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB              0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED            0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER          0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY            0x8
+	__le16 statistic_index;
+	__le16 vebs_used;
+	__le16 vebs_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+	__le16 seid;
+	__le16 switch_id;
+	__le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+	__le16 statistic_index;
+	__le16 vebs_used;
+	__le16 vebs_free;
+	u8     reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic i40e_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct i40e_aqc_macvlan {
+	__le16 num_addresses;
+	__le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT  0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK   (0x3FF << \
+					I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID      0x8000
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
+
+/* indirect data for command and response */
+struct i40e_aqc_add_macvlan_element_data {
+	u8     mac_addr[6];
+	__le16 vlan_tag;
+	__le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH     0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH        0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN       0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE          0x0008
+	__le16 queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT  0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK   (0x7FF << \
+					I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+	/* response section */
+	u8     match_method;
+#define I40E_AQC_MM_PERFECT_MATCH             0x01
+#define I40E_AQC_MM_HASH_MATCH                0x02
+#define I40E_AQC_MM_ERR_NO_RES                0xFF
+	u8     reserved1[3];
+};
+
+struct i40e_aqc_add_remove_macvlan_completion {
+	__le16 perfect_mac_used;
+	__le16 perfect_mac_free;
+	__le16 unicast_hash_free;
+	__le16 multicast_hash_free;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses i40e_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct i40e_aqc_remove_macvlan_element_data {
+	u8     mac_addr[6];
+	__le16 vlan_tag;
+	u8     flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH      0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH         0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN        0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS           0x10
+	u8     reserved[3];
+	/* reply section */
+	u8     error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS         0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL            0xFF
+	u8     reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic i40e_aqc_macvlan for the command
+ */
+struct i40e_aqc_add_remove_vlan_element_data {
+	__le16 vlan_tag;
+	u8     vlan_flags;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_LOCAL             0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT       1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK        (0x3 << \
+						I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR     0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY     0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY   0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT           3
+#define I40E_AQC_VLAN_PTYPE_MASK            (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI     0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI     0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI   0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI    0x18
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_ALL            0x1
+	u8     reserved;
+	u8     result;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_SUCCESS       0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST  0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_SUCCESS    0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL       0xFF
+	u8     reserved1[3];
+};
+
+struct i40e_aqc_add_remove_vlan_completion {
+	u8     reserved[4];
+	__le16 vlans_used;
+	__le16 vlans_free;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct i40e_aqc_set_vsi_promiscuous_modes {
+	__le16 promiscuous_flags;
+	__le16 valid_flags;
+/* flags used for both fields above */
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST     0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST   0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST   0x04
+#define I40E_AQC_SET_VSI_DEFAULT             0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN        0x10
+	__le16 seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK      0x3FF
+	u8     reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_add_tag {
+	__le16 flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE     0x0001
+	__le16 seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT  0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK   (0x3FF << \
+					I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
+	__le16 tag;
+	__le16 queue_number;
+	u8     reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
+
+struct i40e_aqc_add_remove_tag_completion {
+	u8     reserved[12];
+	__le16 tags_used;
+	__le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_remove_tag {
+	__le16 seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT  0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK   (0x3FF << \
+					I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
+	__le16 tag;
+	u8     reserved[12];
+};
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct i40e_aqc_add_remove_mcast_etag {
+	__le16 pv_seid;
+	__le16 etag;
+	u8     num_unicast_etags;
+	u8     reserved[3];
+	__le32 addr_high;          /* address of array of 2-byte s-tags */
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
+
+struct i40e_aqc_add_remove_mcast_etag_completion {
+	u8     reserved[4];
+	__le16 mcast_etags_used;
+	__le16 mcast_etags_free;
+	__le32 addr_high;
+	__le32 addr_low;
+
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct i40e_aqc_update_tag {
+	__le16 seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT  0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK   (0x3FF << \
+					I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
+	__le16 old_tag;
+	__le16 new_tag;
+	u8     reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
+
+struct i40e_aqc_update_tag_completion {
+	u8     reserved[12];
+	__le16 tags_used;
+	__le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the i40e_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct i40e_aqc_add_remove_control_packet_filter {
+	u8     mac[6];
+	__le16 etype;
+	__le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC    0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP          0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE      0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX            0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX            0x0000
+	__le16 seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT  0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK   (0x3FF << \
+				I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
+	__le16 queue;
+	u8     reserved[2];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
+
+struct i40e_aqc_add_remove_control_packet_filter_completion {
+	__le16 mac_etype_used;
+	__le16 etype_used;
+	__le16 mac_etype_free;
+	__le16 etype_free;
+	u8     reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the i40e_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_aqc_add_remove_cloud_filters {
+	u8     num_filters;
+	u8     reserved;
+	__le16 seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT  0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK   (0x3FF << \
+					I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
+	u8     reserved2[4];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
+
+struct i40e_aqc_add_remove_cloud_filters_element_data {
+	u8     outer_mac[6];
+	u8     inner_mac[6];
+	__le16 inner_vlan;
+	union {
+		struct {
+			u8 reserved[12];
+			u8 data[4];
+		} v4;
+		struct {
+			u8 data[16];
+		} v6;
+	} ipaddr;
+	__le16 flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT                 0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK                  (0x3F << \
+					I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE               0x0002
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE        0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL        0x0007
+/* 0x0000 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP                   0x0001
+/* 0x0002 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN            0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID     0x0004
+/* 0x0005 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID           0x0006
+/* 0x0007 reserved */
+/* 0x0008 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC                  0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC                  0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC      0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP                   0x000C
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE               0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT                    6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK                     0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4                   0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6                   0x0100
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT               9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK                0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN               0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC          1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE                 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP                  3
+
+	__le32 tenant_id ;
+	u8     reserved[4];
+	__le16 queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT                  0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK                   (0x3F << \
+					I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+	u8     reserved2[14];
+	/* response section */
+	u8     allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS         0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL            0xFF
+	u8     response_reserved[7];
+};
+
+struct i40e_aqc_remove_cloud_filters_completion {
+	__le16 perfect_ovlan_used;
+	__le16 perfect_ovlan_free;
+	__le16 vlan_used;
+	__le16 vlan_free;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ *       take care to set the flags correctly.
+ */
+struct i40e_aqc_add_delete_mirror_rule {
+	__le16 seid;
+	__le16 rule_type;
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT            0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK             (0x7 << \
+						I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS    1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS     2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN             3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS      4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS       5
+	__le16 num_entries;
+	__le16 destination;  /* VSI for add, rule id for delete */
+	__le32 addr_high;    /* address of array of 2-byte VSI or VLAN ids */
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
+
+struct i40e_aqc_add_delete_mirror_rule_completion {
+	u8     reserved[2];
+	__le16 rule_id;  /* only used on add */
+	__le16 mirror_rules_used;
+	__le16 mirror_rules_free;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+
+/* Set Storm Control Configuration (direct 0x0280)
+ * Get Storm Control Configuration (direct 0x0281)
+ *    the command and response use the same descriptor structure
+ */
+struct i40e_aqc_set_get_storm_control_config {
+	__le32 broadcast_threshold;
+	__le32 multicast_threshold;
+	__le32 control_flags;
+#define I40E_AQC_STORM_CONTROL_MDIPW            0x01
+#define I40E_AQC_STORM_CONTROL_MDICW            0x02
+#define I40E_AQC_STORM_CONTROL_BDIPW            0x04
+#define I40E_AQC_STORM_CONTROL_BDICW            0x08
+#define I40E_AQC_STORM_CONTROL_BIDU             0x10
+#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT   8
+#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK    (0x3FF << \
+					I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
+	u8     reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ *    the command and response use the same descriptor structure
+ */
+struct i40e_aqc_pfc_ignore {
+	u8     tc_bitmap;
+	u8     command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET    0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR  0x0
+	u8     reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct i40e_aqc_tx_sched_ind {
+	__le16 vsi_seid;
+	u8     reserved[6];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct i40e_aqc_qs_handles_resp {
+	__le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct i40e_aqc_configure_vsi_bw_limit {
+	__le16 vsi_seid;
+	u8     reserved[2];
+	__le16 credit;
+	u8     reserved1[2];
+	u8     max_credit; /* 0-3, limit = 2^max */
+	u8     reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ *    responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_ets_sla_bw_data {
+	u8     tc_valid_bits;
+	u8     reserved[15];
+	__le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+
+	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+	__le16 tc_bw_max[2];
+	u8     reserved1[28];
+};
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ *    responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_tc_bw_data {
+	u8     tc_valid_bits;
+	u8     reserved[3];
+	u8     tc_bw_credits[8];
+	u8     reserved1[4];
+	__le16 qs_handles[8];
+};
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct i40e_aqc_query_vsi_bw_config_resp {
+	u8     tc_valid_bits;
+	u8     tc_suspended_bits;
+	u8     reserved[14];
+	__le16 qs_handles[8];
+	u8     reserved1[4];
+	__le16 port_bw_limit;
+	u8     reserved2[2];
+	u8     max_bw; /* 0-3, limit = 2^max */
+	u8     reserved3[23];
+};
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct i40e_aqc_query_vsi_ets_sla_config_resp {
+	u8     tc_valid_bits;
+	u8     reserved[3];
+	u8     share_credits[8];
+	__le16 credits[8];
+
+	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+	__le16 tc_bw_max[2];
+};
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct i40e_aqc_configure_switching_comp_bw_limit {
+	__le16 seid;
+	u8     reserved[2];
+	__le16 credit;
+	u8     reserved1[2];
+	u8     max_bw; /* 0-3, limit = 2^max */
+	u8     reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
+
+/* Enable  Physical Port ETS (indirect 0x0413)
+ * Modify  Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct i40e_aqc_configure_switching_comp_ets_data {
+	u8     reserved[4];
+	u8     tc_valid_bits;
+	u8     reserved1;
+	u8     tc_strict_priority_flags;
+	u8     reserved2[17];
+	u8     tc_bw_share_credits[8];
+	u8     reserved3[96];
+};
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
+	u8     tc_valid_bits;
+	u8     reserved[15];
+	__le16 tc_bw_credit[8];
+
+	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+	__le16 tc_bw_max[2];
+	u8     reserved1[28];
+};
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct i40e_aqc_configure_switching_comp_bw_config_data {
+	u8     tc_valid_bits;
+	u8     reserved[2];
+	u8     absolute_credits; /* bool */
+	u8     tc_bw_share_credits[8];
+	u8     reserved1[20];
+};
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct i40e_aqc_query_switching_comp_ets_config_resp {
+	u8     tc_valid_bits;
+	u8     reserved[35];
+	__le16 port_bw_limit;
+	u8     reserved1[2];
+	u8     tc_bw_max; /* 0-3, limit = 2^max */
+	u8     reserved2[23];
+};
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct i40e_aqc_query_port_ets_config_resp {
+	u8     reserved[4];
+	u8     tc_valid_bits;
+	u8     reserved1;
+	u8     tc_strict_priority_bits;
+	u8     reserved2;
+	u8     tc_bw_share_credits[8];
+	__le16 tc_bw_limits[8];
+
+	/* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+	__le16 tc_bw_max[2];
+	u8     reserved3[32];
+};
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct i40e_aqc_query_switching_comp_bw_config_resp {
+	u8     tc_valid_bits;
+	u8     reserved[2];
+	u8     absolute_credits_enable; /* bool */
+	u8     tc_bw_share_credits[8];
+	__le16 tc_bw_limits[8];
+
+	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+	__le16 tc_bw_max[2];
+};
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+	u8     pm_profile;
+	u8     pe_vf_enabled;
+	u8     reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+	/* I40E_HMC_PROFILE_NO_CHANGE    = 0, reserved */
+	I40E_HMC_PROFILE_DEFAULT     = 1,
+	I40E_HMC_PROFILE_FAVOR_VF    = 2,
+	I40E_HMC_PROFILE_EQUAL       = 3,
+};
+
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK       0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK    0x3F
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES  0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES     0x0002
+
+enum i40e_aq_phy_type {
+	I40E_PHY_TYPE_SGMII			= 0x0,
+	I40E_PHY_TYPE_1000BASE_KX		= 0x1,
+	I40E_PHY_TYPE_10GBASE_KX4		= 0x2,
+	I40E_PHY_TYPE_10GBASE_KR		= 0x3,
+	I40E_PHY_TYPE_40GBASE_KR4		= 0x4,
+	I40E_PHY_TYPE_XAUI			= 0x5,
+	I40E_PHY_TYPE_XFI			= 0x6,
+	I40E_PHY_TYPE_SFI			= 0x7,
+	I40E_PHY_TYPE_XLAUI			= 0x8,
+	I40E_PHY_TYPE_XLPPI			= 0x9,
+	I40E_PHY_TYPE_40GBASE_CR4_CU		= 0xA,
+	I40E_PHY_TYPE_10GBASE_CR1_CU		= 0xB,
+	I40E_PHY_TYPE_100BASE_TX		= 0x11,
+	I40E_PHY_TYPE_1000BASE_T		= 0x12,
+	I40E_PHY_TYPE_10GBASE_T			= 0x13,
+	I40E_PHY_TYPE_10GBASE_SR		= 0x14,
+	I40E_PHY_TYPE_10GBASE_LR		= 0x15,
+	I40E_PHY_TYPE_10GBASE_SFPP_CU		= 0x16,
+	I40E_PHY_TYPE_10GBASE_CR1		= 0x17,
+	I40E_PHY_TYPE_40GBASE_CR4		= 0x18,
+	I40E_PHY_TYPE_40GBASE_SR4		= 0x19,
+	I40E_PHY_TYPE_40GBASE_LR4		= 0x1A,
+	I40E_PHY_TYPE_20GBASE_KR2		= 0x1B,
+	I40E_PHY_TYPE_MAX
+};
+
+#define I40E_LINK_SPEED_100MB_SHIFT	0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT	0x2
+#define I40E_LINK_SPEED_10GB_SHIFT	0x3
+#define I40E_LINK_SPEED_40GB_SHIFT	0x4
+#define I40E_LINK_SPEED_20GB_SHIFT	0x5
+
+enum i40e_aq_link_speed {
+	I40E_LINK_SPEED_UNKNOWN	= 0,
+	I40E_LINK_SPEED_100MB	= (1 << I40E_LINK_SPEED_100MB_SHIFT),
+	I40E_LINK_SPEED_1GB	= (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+	I40E_LINK_SPEED_10GB	= (1 << I40E_LINK_SPEED_10GB_SHIFT),
+	I40E_LINK_SPEED_40GB	= (1 << I40E_LINK_SPEED_40GB_SHIFT),
+	I40E_LINK_SPEED_20GB	= (1 << I40E_LINK_SPEED_20GB_SHIFT)
+};
+
+struct i40e_aqc_module_desc {
+	u8 oui[3];
+	u8 reserved1;
+	u8 part_number[16];
+	u8 revision[4];
+	u8 reserved2[8];
+};
+
+struct i40e_aq_get_phy_abilities_resp {
+	__le32 phy_type;       /* bitmap using the above enum for offsets */
+	u8     link_speed;     /* bitmap using the above enum bit patterns */
+	u8     abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX         0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX         0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER        0x04
+#define I40E_AQ_PHY_FLAG_AN_SHIFT         3
+#define I40E_AQ_PHY_FLAG_AN_MASK          (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
+#define I40E_AQ_PHY_FLAG_AN_OFF           0x00 /* link forced on */
+#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
+#define I40E_AQ_PHY_FLAG_AN_ON            0x02
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL      0x20
+	__le16 eee_capability;
+#define I40E_AQ_EEE_100BASE_TX       0x0002
+#define I40E_AQ_EEE_1000BASE_T       0x0004
+#define I40E_AQ_EEE_10GBASE_T        0x0008
+#define I40E_AQ_EEE_1000BASE_KX      0x0010
+#define I40E_AQ_EEE_10GBASE_KX4      0x0020
+#define I40E_AQ_EEE_10GBASE_KR       0x0040
+	__le32 eeer_val;
+	u8     d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA  0x01
+	u8     reserved[3];
+	u8     phy_id[4];
+	u8     module_type[3];
+	u8     qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS          16
+	struct i40e_aqc_module_desc  qualified_module[I40E_AQ_PHY_MAX_QMS];
+};
+
+/* Set PHY Config (direct 0x0601) */
+struct i40e_aq_set_phy_config { /* same bits as above in all */
+	__le32 phy_type;
+	u8     link_speed;
+	u8     abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK		0x08
+#define I40E_AQ_PHY_ENABLE_AN		0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK	0x20
+	__le16 eee_capability;
+	__le32 eeer;
+	u8     low_power_ctrl;
+	u8     reserved[3];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct i40e_aq_set_mac_config {
+	__le16 max_frame_size;
+	u8     params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN           0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK      0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT     3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE      0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX   0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX   0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX   0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX   0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX   0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX   0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX   0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX   0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX   0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX   0x1
+	u8     tx_timer_priority; /* bitmap */
+	__le16 tx_timer_value;
+	__le16 fc_refresh_threshold;
+	u8     reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct i40e_aqc_set_link_restart_an {
+	u8     command;
+#define I40E_AQ_PHY_RESTART_AN  0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+	u8     reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct i40e_aqc_get_link_status {
+	__le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK             0x3
+#define I40E_AQ_LSE_NOP              0x0
+#define I40E_AQ_LSE_DISABLE          0x2
+#define I40E_AQ_LSE_ENABLE           0x3
+/* only response uses this flag */
+#define I40E_AQ_LSE_IS_ENABLED       0x1
+	u8     phy_type;    /* i40e_aq_phy_type   */
+	u8     link_speed;  /* i40e_aq_link_speed */
+	u8     link_info;
+#define I40E_AQ_LINK_UP              0x01
+#define I40E_AQ_LINK_FAULT           0x02
+#define I40E_AQ_LINK_FAULT_TX        0x04
+#define I40E_AQ_LINK_FAULT_RX        0x08
+#define I40E_AQ_LINK_FAULT_REMOTE    0x10
+#define I40E_AQ_MEDIA_AVAILABLE      0x40
+#define I40E_AQ_SIGNAL_DETECT        0x80
+	u8     an_info;
+#define I40E_AQ_AN_COMPLETED         0x01
+#define I40E_AQ_LP_AN_ABILITY        0x02
+#define I40E_AQ_PD_FAULT             0x04
+#define I40E_AQ_FEC_EN               0x08
+#define I40E_AQ_PHY_LOW_POWER        0x10
+#define I40E_AQ_LINK_PAUSE_TX        0x20
+#define I40E_AQ_LINK_PAUSE_RX        0x40
+#define I40E_AQ_QUALIFIED_MODULE     0x80
+	u8     ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM  0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define I40E_AQ_LINK_TX_SHIFT        0x02
+#define I40E_AQ_LINK_TX_MASK         (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE       0x00
+#define I40E_AQ_LINK_TX_DRAINED      0x01
+#define I40E_AQ_LINK_TX_FLUSHED      0x03
+	u8     loopback;         /* use defines from i40e_aqc_set_lb_mode */
+	__le16 max_frame_size;
+	u8     config;
+#define I40E_AQ_CONFIG_CRC_ENA       0x04
+#define I40E_AQ_CONFIG_PACING_MASK   0x78
+	u8     reserved[5];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct i40e_aqc_set_phy_int_mask {
+	u8     reserved[8];
+	__le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN       0x0002
+#define I40E_AQ_EVENT_MEDIA_NA          0x0004
+#define I40E_AQ_EVENT_LINK_FAULT        0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM    0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS  0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT     0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED      0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL  0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+	u8     reserved1[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct i40e_aqc_an_advt_reg {
+	__le32 local_an_reg0;
+	__le16 local_an_reg1;
+	u8     reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct i40e_aqc_set_lb_mode {
+	__le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL   0x01
+#define I40E_AQ_LB_PHY_REMOTE  0x02
+#define I40E_AQ_LB_MAC_LOCAL   0x04
+	u8     reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
+
+/* Set PHY Reset command (0x0622) */
+struct i40e_aqc_set_phy_reset {
+	u8     reset_flags;
+#define I40E_AQ_PHY_RESET_REQUEST  0x02
+	u8     reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset);
+
+enum i40e_aq_phy_reg_type {
+	I40E_AQC_PHY_REG_INTERNAL         = 0x1,
+	I40E_AQC_PHY_REG_EXERNAL_BASET    = 0x2,
+	I40E_AQC_PHY_REG_EXERNAL_MODULE   = 0x3
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct i40e_aqc_nvm_update {
+	u8     command_flags;
+#define I40E_AQ_NVM_LAST_CMD    0x01
+#define I40E_AQ_NVM_FLASH_ONLY  0x80
+	u8     module_pointer;
+	__le16 length;
+	__le32 offset;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+	__le32 id;
+	u8     reserved[4];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct i40e_aqc_alternate_write {
+	__le32 address0;
+	__le32 data0;
+	__le32 address1;
+	__le32 data1;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct i40e_aqc_alternate_ind_write {
+	__le32 address;
+	__le32 length;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses i40e_aq_desc
+ */
+struct i40e_aqc_alternate_write_done {
+	__le16 cmd_flags;
+#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK	1
+#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY	0
+#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI	1
+#define I40E_AQ_ALTERNATE_RESET_NEEDED		2
+	u8     reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct i40e_aqc_alternate_set_mode {
+	__le32 mode;
+#define I40E_AQ_ALTERNATE_MODE_NONE	0
+#define I40E_AQ_ALTERNATE_MODE_OEM	1
+	u8     reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct i40e_aqc_lan_overflow {
+	__le32 prtdcb_rupto;
+	__le32 otx_ctl;
+	u8     reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct i40e_aqc_lldp_get_mib {
+	u8     type;
+	u8     reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK                      0x3
+#define I40E_AQ_LLDP_MIB_LOCAL                          0x0
+#define I40E_AQ_LLDP_MIB_REMOTE                         0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE               0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK                   0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT                  0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE         0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR               0x1
+#define I40E_AQ_LLDP_TX_SHIFT              0x4
+#define I40E_AQ_LLDP_TX_MASK               (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+/* TX pause flags use I40E_AQ_LINK_TX_* above */
+	__le16 local_len;
+	__le16 remote_len;
+	u8     reserved2[2];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct i40e_aqc_lldp_update_mib {
+	u8     command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE          0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE         0x1
+	u8     reserved[7];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct i40e_aqc_lldp_add_tlv {
+	u8     type; /* only nearest bridge and non-TPMR from 0x0A00 */
+	u8     reserved1[1];
+	__le16 len;
+	u8     reserved2[4];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct i40e_aqc_lldp_update_tlv {
+	u8     type; /* only nearest bridge and non-TPMR from 0x0A00 */
+	u8     reserved;
+	__le16 old_len;
+	__le16 new_offset;
+	__le16 new_len;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct i40e_aqc_lldp_stop {
+	u8     command;
+#define I40E_AQ_LLDP_AGENT_STOP                 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN             0x1
+	u8     reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+
+struct i40e_aqc_lldp_start {
+	u8     command;
+#define I40E_AQ_LLDP_AGENT_START                0x1
+	u8     reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+
+/* Apply MIB changes (0x0A07)
+ * uses the generic struc as it contains no data
+ */
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct i40e_aqc_add_udp_tunnel {
+	__le16 udp_port;
+	u8     header_len; /* in DWords, 1 to 15 */
+	u8     protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_TEREDO	0x0
+#define I40E_AQC_TUNNEL_TYPE_VXLAN	0x2
+#define I40E_AQC_TUNNEL_TYPE_NGE	0x3
+	u8     variable_udp_length;
+#define I40E_AQC_TUNNEL_FIXED_UDP_LENGTH	0x0
+#define I40E_AQC_TUNNEL_VARIABLE_UDP_LENGTH	0x1
+	u8		udp_key_index;
+#define I40E_AQC_TUNNEL_KEY_INDEX_VXLAN			0x0
+#define I40E_AQC_TUNNEL_KEY_INDEX_NGE			0x1
+#define I40E_AQC_TUNNEL_KEY_INDEX_PROPRIETARY_UDP	0x2
+	u8		reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+
+struct i40e_aqc_add_udp_tunnel_completion {
+	__le16 udp_port;
+	u8	filter_entry_index;
+	u8	multiple_pfs;
+#define I40E_AQC_SINGLE_PF				0x0
+#define I40E_AQC_MULTIPLE_PFS			0x1
+	u8	total_filters;
+	u8	reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct i40e_aqc_remove_udp_tunnel {
+	u8     reserved[2];
+	u8     index; /* 0 to 15 */
+	u8     pf_filters;
+	u8     total_filters;
+	u8     reserved2[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
+
+struct i40e_aqc_del_udp_tunnel_completion {
+	__le16 udp_port;
+	u8     index; /* 0 to 15 */
+	u8     multiple_pfs;
+	u8     total_filters_used;
+	u8     reserved;
+	u8     tunnels_free;
+	u8     reserved1[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+
+/* tunnel key structure 0x0B10 */
+
+struct i40e_aqc_tunnel_key_structure_A0 {
+	__le16     key1_off;
+	__le16     key1_len;
+	__le16     key2_off;
+	__le16     key2_len;
+	__le16     flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS    0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED   0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+	u8         resreved[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure_A0);
+
+struct i40e_aqc_tunnel_key_structure {
+	u8	key1_off;
+	u8	key2_off;
+	u8	key1_len;  /* 0 to 15 */
+	u8	key2_len;  /* 0 to 15 */
+	u8	flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS    0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED   0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+	u8	network_key_index;
+#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN		0x0
+#define I40E_AQC_NETWORK_KEY_INDEX_NGE			0x1
+#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP	0x2
+#define I40E_AQC_NETWORK_KEY_INDEX_GRE			0x3
+	u8	reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct i40e_aqc_oem_param_change {
+	__le32 param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL   0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL   1
+#define I40E_AQ_OEM_PARAM_MAC           2
+	__le32 param_value1;
+	u8     param_value2[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
+
+struct i40e_aqc_oem_state_change {
+	__le32 state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN  0x0
+#define I40E_AQ_OEM_STATE_LINK_UP    0x1
+	u8     reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct i40e_acq_set_test_mode {
+	u8     mode;
+#define I40E_AQ_TEST_PARTIAL    0
+#define I40E_AQ_TEST_FULL       1
+#define I40E_AQ_TEST_NVM        2
+	u8     reserved[3];
+	u8     command;
+#define I40E_AQ_TEST_OPEN        0
+#define I40E_AQ_TEST_CLOSE       1
+#define I40E_AQ_TEST_INC         2
+	u8     reserved2[3];
+	__le32 address_high;
+	__le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct i40e_aqc_debug_reg_read_write {
+	__le32 reserved;
+	__le32 address;
+	__le32 value_high;
+	__le32 value_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read  (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* i40e_aq_desc is used for the command */
+struct i40e_aqc_debug_reg_sg_element_data {
+	__le32 address;
+	__le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct i40e_aqc_debug_modify_reg {
+	__le32 address;
+	__le32 value;
+	__le32 clear_mask;
+	__le32 set_mask;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+
+#define I40E_AQ_CLUSTER_ID_AUX		0
+#define I40E_AQ_CLUSTER_ID_SWITCH_FLU	1
+#define I40E_AQ_CLUSTER_ID_TXSCHED	2
+#define I40E_AQ_CLUSTER_ID_HMC		3
+#define I40E_AQ_CLUSTER_ID_MAC0		4
+#define I40E_AQ_CLUSTER_ID_MAC1		5
+#define I40E_AQ_CLUSTER_ID_MAC2		6
+#define I40E_AQ_CLUSTER_ID_MAC3		7
+#define I40E_AQ_CLUSTER_ID_DCB		8
+#define I40E_AQ_CLUSTER_ID_EMP_MEM	9
+#define I40E_AQ_CLUSTER_ID_PKT_BUF	10
+#define I40E_AQ_CLUSTER_ID_ALTRAM	11
+
+struct i40e_aqc_debug_dump_internals {
+	u8     cluster_id;
+	u8     table_id;
+	__le16 data_size;
+	__le32 idx;
+	__le32 address_high;
+	__le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
+
+struct i40e_aqc_debug_modify_internals {
+	u8     cluster_id;
+	u8     cluster_specific_params[7];
+	__le32 address_high;
+	__le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
+
+#endif
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
new file mode 100644
index 000000000000..d8654fb9e525
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
@@ -0,0 +1,55 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ALLOC_H_
+#define _I40E_ALLOC_H_
+
+struct i40e_hw;
+
+/* Memory allocation types */
+enum i40e_memory_type {
+	i40e_mem_arq_buf = 0,		/* ARQ indirect command buffer */
+	i40e_mem_asq_buf = 1,
+	i40e_mem_atq_buf = 2,		/* ATQ indirect command buffer */
+	i40e_mem_arq_ring = 3,		/* ARQ descriptor ring */
+	i40e_mem_atq_ring = 4,		/* ATQ descriptor ring */
+	i40e_mem_pd = 5,		/* Page Descriptor */
+	i40e_mem_bp = 6,		/* Backing Page - 4KB */
+	i40e_mem_bp_jumbo = 7,		/* Backing Page - > 4KB */
+	i40e_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
+					    struct i40e_dma_mem *mem,
+					    enum i40e_memory_type type,
+					    u64 size, u32 alignment);
+i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
+					struct i40e_dma_mem *mem);
+i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
+					     struct i40e_virt_mem *mem,
+					     u32 size);
+i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
+					 struct i40e_virt_mem *mem);
+
+#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
new file mode 100644
index 000000000000..7b13953b28c4
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -0,0 +1,254 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_type.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+
+/**
+ * i40e_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+{
+	i40e_status status = 0;
+
+	if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
+		switch (hw->device_id) {
+		case I40E_DEV_ID_SFP_XL710:
+		case I40E_DEV_ID_SFP_X710:
+		case I40E_DEV_ID_QEMU:
+		case I40E_DEV_ID_KX_A:
+		case I40E_DEV_ID_KX_B:
+		case I40E_DEV_ID_KX_C:
+		case I40E_DEV_ID_KX_D:
+		case I40E_DEV_ID_QSFP_A:
+		case I40E_DEV_ID_QSFP_B:
+		case I40E_DEV_ID_QSFP_C:
+			hw->mac.type = I40E_MAC_XL710;
+			break;
+		case I40E_DEV_ID_VF:
+		case I40E_DEV_ID_VF_HV:
+			hw->mac.type = I40E_MAC_VF;
+			break;
+		default:
+			hw->mac.type = I40E_MAC_GENERIC;
+			break;
+		}
+	} else {
+		status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+	}
+
+	hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
+		  hw->mac.type, status);
+	return status;
+}
+
+/**
+ * i40evf_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
+		   void *buffer)
+{
+	struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+	u8 *aq_buffer = (u8 *)buffer;
+	u32 data[4];
+	u32 i = 0;
+
+	if ((!(mask & hw->debug_mask)) || (desc == NULL))
+		return;
+
+	i40e_debug(hw, mask,
+		   "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+		   aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
+		   aq_desc->retval);
+	i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+		   aq_desc->cookie_high, aq_desc->cookie_low);
+	i40e_debug(hw, mask, "\tparam (0,1)  0x%08X 0x%08X\n",
+		   aq_desc->params.internal.param0,
+		   aq_desc->params.internal.param1);
+	i40e_debug(hw, mask, "\taddr (h,l)   0x%08X 0x%08X\n",
+		   aq_desc->params.external.addr_high,
+		   aq_desc->params.external.addr_low);
+
+	if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+		memset(data, 0, sizeof(data));
+		i40e_debug(hw, mask, "AQ CMD Buffer:\n");
+		for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) {
+			data[((i % 16) / 4)] |=
+				((u32)aq_buffer[i]) << (8 * (i % 4));
+			if ((i % 16) == 15) {
+				i40e_debug(hw, mask,
+					   "\t0x%04X  %08X %08X %08X %08X\n",
+					   i - 15, data[0], data[1], data[2],
+					   data[3]);
+				memset(data, 0, sizeof(data));
+			}
+		}
+		if ((i % 16) != 0)
+			i40e_debug(hw, mask, "\t0x%04X  %08X %08X %08X %08X\n",
+				   i - (i % 16), data[0], data[1], data[2],
+				   data[3]);
+	}
+}
+
+/**
+ * i40evf_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool i40evf_check_asq_alive(struct i40e_hw *hw)
+{
+	return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+}
+
+/**
+ * i40evf_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
+					     bool unloading)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_queue_shutdown *cmd =
+		(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+	i40e_status status;
+
+	i40evf_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_queue_shutdown);
+
+	if (unloading)
+		cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+	status = i40evf_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+	return status;
+}
+
+
+/**
+ * i40e_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to PF driver using admin queue. By default, this message
+ * is sent asynchronously, i.e. i40evf_asq_send_command() does not wait for
+ * completion before returning.
+ **/
+i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+				enum i40e_virtchnl_ops v_opcode,
+				i40e_status v_retval,
+				u8 *msg, u16 msglen,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	i40e_status status;
+
+	i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+	desc.cookie_high = cpu_to_le32(v_opcode);
+	desc.cookie_low = cpu_to_le32(v_retval);
+	if (msglen) {
+		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF
+						| I40E_AQ_FLAG_RD));
+		if (msglen > I40E_AQ_LARGE_BUF)
+			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+		desc.datalen = cpu_to_le16(msglen);
+	}
+	if (!cmd_details) {
+		struct i40e_asq_cmd_details details;
+		memset(&details, 0, sizeof(details));
+		details.async = true;
+		cmd_details = &details;
+	}
+	status = i40evf_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg,
+				       msglen, cmd_details);
+	return status;
+}
+
+/**
+ * i40e_vf_parse_hw_config
+ * @hw: pointer to the hardware structure
+ * @msg: pointer to the virtual channel VF resource structure
+ *
+ * Given a VF resource message from the PF, populate the hw struct
+ * with appropriate information.
+ **/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+			     struct i40e_virtchnl_vf_resource *msg)
+{
+	struct i40e_virtchnl_vsi_resource *vsi_res;
+	int i;
+
+	vsi_res = &msg->vsi_res[0];
+
+	hw->dev_caps.num_vsis = msg->num_vsis;
+	hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+	hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+	hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+	hw->dev_caps.dcb = msg->vf_offload_flags &
+			   I40E_VIRTCHNL_VF_OFFLOAD_L2;
+	hw->dev_caps.fcoe = (msg->vf_offload_flags &
+			     I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
+	for (i = 0; i < msg->num_vsis; i++) {
+		if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
+			memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr,
+			       ETH_ALEN);
+			memcpy(hw->mac.addr, vsi_res->default_mac_addr,
+			       ETH_ALEN);
+		}
+		vsi_res++;
+	}
+}
+
+/**
+ * i40e_vf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a VF_RESET message to the PF. Does not wait for response from PF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the admin queue should be shut down and (optionally) reinitialized.
+ **/
+i40e_status i40e_vf_reset(struct i40e_hw *hw)
+{
+	return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
+				      0, NULL, 0, NULL);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
new file mode 100644
index 000000000000..cb97b3eed440
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -0,0 +1,238 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_HMC_H_
+#define _I40E_HMC_H_
+
+#define I40E_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+#define I40E_HMC_INFO_SIGNATURE		0x484D5347 /* HMSG */
+#define I40E_HMC_PD_CNT_IN_SD		512
+#define I40E_HMC_DIRECT_BP_SIZE		0x200000 /* 2M */
+#define I40E_HMC_PAGED_BP_SIZE		4096
+#define I40E_HMC_PD_BP_BUF_ALIGNMENT	4096
+#define I40E_FIRST_VF_FPM_ID		16
+
+struct i40e_hmc_obj_info {
+	u64 base;	/* base addr in FPM */
+	u32 max_cnt;	/* max count available for this hmc func */
+	u32 cnt;	/* count of objects driver actually wants to create */
+	u64 size;	/* size in bytes of one object */
+};
+
+enum i40e_sd_entry_type {
+	I40E_SD_TYPE_INVALID = 0,
+	I40E_SD_TYPE_PAGED   = 1,
+	I40E_SD_TYPE_DIRECT  = 2
+};
+
+struct i40e_hmc_bp {
+	enum i40e_sd_entry_type entry_type;
+	struct i40e_dma_mem addr; /* populate to be used by hw */
+	u32 sd_pd_index;
+	u32 ref_cnt;
+};
+
+struct i40e_hmc_pd_entry {
+	struct i40e_hmc_bp bp;
+	u32 sd_index;
+	bool valid;
+};
+
+struct i40e_hmc_pd_table {
+	struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
+	struct i40e_hmc_pd_entry  *pd_entry; /* [512] for sw book keeping */
+	struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+	u32 ref_cnt;
+	u32 sd_index;
+};
+
+struct i40e_hmc_sd_entry {
+	enum i40e_sd_entry_type entry_type;
+	bool valid;
+
+	union {
+		struct i40e_hmc_pd_table pd_table;
+		struct i40e_hmc_bp bp;
+	} u;
+};
+
+struct i40e_hmc_sd_table {
+	struct i40e_virt_mem addr; /* used to track sd_entry allocations */
+	u32 sd_cnt;
+	u32 ref_cnt;
+	struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct i40e_hmc_info {
+	u32 signature;
+	/* equals to pci func num for PF and dynamically allocated for VFs */
+	u8 hmc_fn_id;
+	u16 first_sd_index; /* index of the first available SD */
+
+	/* hmc objects */
+	struct i40e_hmc_obj_info *hmc_obj;
+	struct i40e_virt_mem hmc_obj_virt_mem;
+	struct i40e_hmc_sd_table sd_table;
+};
+
+#define I40E_INC_SD_REFCNT(sd_table)	((sd_table)->ref_cnt++)
+#define I40E_INC_PD_REFCNT(pd_table)	((pd_table)->ref_cnt++)
+#define I40E_INC_BP_REFCNT(bp)		((bp)->ref_cnt++)
+
+#define I40E_DEC_SD_REFCNT(sd_table)	((sd_table)->ref_cnt--)
+#define I40E_DEC_PD_REFCNT(pd_table)	((pd_table)->ref_cnt--)
+#define I40E_DEC_BP_REFCNT(bp)		((bp)->ref_cnt--)
+
+/**
+ * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type)			\
+{									\
+	u32 val1, val2, val3;						\
+	val1 = (u32)(upper_32_bits(pa));				\
+	val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT <<			\
+		 I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
+		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
+		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |			\
+		(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);		\
+	val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
+	wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);			\
+	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
+	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
+}
+
+/**
+ * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type)			\
+{									\
+	u32 val2, val3;							\
+	val2 = (I40E_HMC_MAX_BP_COUNT <<				\
+		I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
+		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
+		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);			\
+	val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
+	wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);				\
+	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
+	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
+}
+
+/**
+ * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ **/
+#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx)			\
+	wr32((hw), I40E_PFHMC_PDINV,					\
+	    (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) |		\
+	     ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id)	   \
+	wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
+	     (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) |		   \
+	      ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{									\
+	u64 fpm_addr, fpm_limit;					\
+	fpm_addr = (hmc_info)->hmc_obj[(type)].base +			\
+		   (hmc_info)->hmc_obj[(type)].size * (index);		\
+	fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+	*(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE);		\
+	*(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE);	\
+	/* add one more to the limit to correct our range */		\
+	*(sd_limit) += 1;						\
+}
+
+/**
+ * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{									\
+	u64 fpm_adr, fpm_limit;						\
+	fpm_adr = (hmc_info)->hmc_obj[(type)].base +			\
+		  (hmc_info)->hmc_obj[(type)].size * (idx);		\
+	fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);	\
+	*(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE);		\
+	*(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE);	\
+	/* add one more to the limit to correct our range */		\
+	*(pd_limit) += 1;						\
+}
+i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+					      struct i40e_hmc_info *hmc_info,
+					      u32 sd_index,
+					      enum i40e_sd_entry_type type,
+					      u64 direct_mode_sz);
+
+i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+					      struct i40e_hmc_info *hmc_info,
+					      u32 pd_index);
+i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+					struct i40e_hmc_info *hmc_info,
+					u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+					     u32 idx);
+i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+					    struct i40e_hmc_info *hmc_info,
+					    u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+					       u32 idx);
+i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+					      struct i40e_hmc_info *hmc_info,
+					      u32 idx, bool is_pf);
+
+#endif /* _I40E_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
new file mode 100644
index 000000000000..17e42ca26d0b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
@@ -0,0 +1,165 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_LAN_HMC_H_
+#define _I40E_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data */
+struct i40e_hmc_obj_rxq {
+	u16 head;
+	u8  cpuid;
+	u64 base;
+	u16 qlen;
+#define I40E_RXQ_CTX_DBUFF_SHIFT 7
+	u8  dbuff;
+#define I40E_RXQ_CTX_HBUFF_SHIFT 6
+	u8  hbuff;
+	u8  dtype;
+	u8  dsize;
+	u8  crcstrip;
+	u8  fc_ena;
+	u8  l2tsel;
+	u8  hsplit_0;
+	u8  hsplit_1;
+	u8  showiv;
+	u16 rxmax;
+	u8  tphrdesc_ena;
+	u8  tphwdesc_ena;
+	u8  tphdata_ena;
+	u8  tphhead_ena;
+	u8  lrxqthresh;
+};
+
+/* Tx queue context data */
+struct i40e_hmc_obj_txq {
+	u16 head;
+	u8  new_context;
+	u64 base;
+	u8  fc_ena;
+	u8  timesync_ena;
+	u8  fd_ena;
+	u8  alt_vlan_ena;
+	u16 thead_wb;
+	u16 cpuid;
+	u8  head_wb_ena;
+	u16 qlen;
+	u8  tphrdesc_ena;
+	u8  tphrpacket_ena;
+	u8  tphwdesc_ena;
+	u64 head_wb_addr;
+	u32 crc;
+	u16 rdylist;
+	u8  rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum i40e_hmc_obj_rx_hsplit_0 {
+	I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT      = 0,
+	I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2      = 1,
+	I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP      = 2,
+	I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+	I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP    = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct i40e_hmc_obj_fcoe_cntx {
+	u32 rsv[32];
+};
+
+struct i40e_hmc_obj_fcoe_filt {
+	u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum i40e_hmc_lan_object_size {
+	I40E_HMC_LAN_OBJ_SZ_8   = 0x3,
+	I40E_HMC_LAN_OBJ_SZ_16  = 0x4,
+	I40E_HMC_LAN_OBJ_SZ_32  = 0x5,
+	I40E_HMC_LAN_OBJ_SZ_64  = 0x6,
+	I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
+	I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
+	I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define I40E_HMC_OBJ_SIZE_TXQ         128
+#define I40E_HMC_OBJ_SIZE_RXQ         32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX   128
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT   64
+
+enum i40e_hmc_lan_rsrc_type {
+	I40E_HMC_LAN_FULL  = 0,
+	I40E_HMC_LAN_TX    = 1,
+	I40E_HMC_LAN_RX    = 2,
+	I40E_HMC_FCOE_CTX  = 3,
+	I40E_HMC_FCOE_FILT = 4,
+	I40E_HMC_LAN_MAX   = 5
+};
+
+enum i40e_hmc_model {
+	I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
+	I40E_HMC_MODEL_DIRECT_ONLY      = 1,
+	I40E_HMC_MODEL_PAGED_ONLY       = 2,
+	I40E_HMC_MODEL_UNKNOWN,
+};
+
+struct i40e_hmc_lan_create_obj_info {
+	struct i40e_hmc_info *hmc_info;
+	u32 rsrc_type;
+	u32 start_idx;
+	u32 count;
+	enum i40e_sd_entry_type entry_type;
+	u64 direct_mode_sz;
+};
+
+struct i40e_hmc_lan_delete_obj_info {
+	struct i40e_hmc_info *hmc_info;
+	u32 rsrc_type;
+	u32 start_idx;
+	u32 count;
+};
+
+i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+					u32 rxq_num, u32 fcoe_cntx_num,
+					u32 fcoe_filt_num);
+i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+					     enum i40e_hmc_model model);
+i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+						      u16 queue);
+i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+						    u16 queue,
+						    struct i40e_hmc_obj_txq *s);
+i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+						      u16 queue);
+i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+						    u16 queue,
+						    struct i40e_hmc_obj_rxq *s);
+
+#endif /* _I40E_LAN_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
new file mode 100644
index 000000000000..622f373b745d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
@@ -0,0 +1,72 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_OSDEP_H_
+#define _I40E_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/tcp.h>
+#include <linux/pci.h>
+
+/* get readq/writeq support for 32 bit kernels, use the low-first version */
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
+/* File to be the magic between shared code and
+ * actual OS primitives
+ */
+
+#define hw_dbg(hw, S, A...)	do {} while (0)
+
+#define wr32(a, reg, value)	writel((value), ((a)->hw_addr + (reg)))
+#define rd32(a, reg)		readl((a)->hw_addr + (reg))
+
+#define wr64(a, reg, value)	writeq((value), ((a)->hw_addr + (reg)))
+#define rd64(a, reg)		readq((a)->hw_addr + (reg))
+#define i40e_flush(a)		readl((a)->hw_addr + I40E_VFGEN_RSTAT)
+
+/* memory allocation tracking */
+struct i40e_dma_mem {
+	void *va;
+	dma_addr_t pa;
+	u32 size;
+} __packed;
+
+#define i40e_allocate_dma_mem(h, m, unused, s, a) \
+	i40evf_allocate_dma_mem_d(h, m, s, a)
+#define i40e_free_dma_mem(h, m) i40evf_free_dma_mem_d(h, m)
+
+struct i40e_virt_mem {
+	void *va;
+	u32 size;
+} __packed;
+#define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s)
+#define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m)
+
+#define i40e_debug(h, m, s, ...)  i40evf_debug_d(h, m, s, ##__VA_ARGS__)
+extern void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
+	__attribute__ ((format(gnu_printf, 3, 4)));
+
+typedef enum i40e_status_code i40e_status;
+#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
new file mode 100644
index 000000000000..7841573a58c9
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -0,0 +1,84 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_PROTOTYPE_H_
+#define _I40E_PROTOTYPE_H_
+
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include "i40e_virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures.  These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+i40e_status i40evf_init_adminq(struct i40e_hw *hw);
+i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw);
+void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
+					     struct i40e_arq_event_info *e,
+					     u16 *events_pending);
+i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
+				struct i40e_aq_desc *desc,
+				void *buff, /* can be NULL */
+				u16  buff_size,
+				struct i40e_asq_cmd_details *cmd_details);
+bool i40evf_asq_done(struct i40e_hw *hw);
+
+/* debug function for adminq */
+void i40evf_debug_aq(struct i40e_hw *hw,
+		   enum i40e_debug_mask mask,
+		   void *desc,
+		   void *buffer);
+
+void i40e_idle_aq(struct i40e_hw *hw);
+void i40evf_resume_aq(struct i40e_hw *hw);
+bool i40evf_check_asq_alive(struct i40e_hw *hw);
+i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
+					     bool unloading);
+
+i40e_status i40e_set_mac_type(struct i40e_hw *hw);
+
+/* prototype for functions used for SW locks */
+
+/* i40e_common for VF drivers*/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+			     struct i40e_virtchnl_vf_resource *msg);
+i40e_status i40e_vf_reset(struct i40e_hw *hw);
+i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+				enum i40e_virtchnl_ops v_opcode,
+				i40e_status v_retval,
+				u8 *msg, u16 msglen,
+				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+				struct i40e_filter_control_settings *settings);
+i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+				u8 *mac_addr, u16 ethtype, u16 flags,
+				u16 vsi_seid, u16 queue, bool is_add,
+				struct i40e_control_filter_stats *stats,
+				struct i40e_asq_cmd_details *cmd_details);
+#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
new file mode 100644
index 000000000000..30af953cf106
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -0,0 +1,4667 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_REGISTER_H_
+#define _I40E_REGISTER_H_
+
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK (0xFFFFFFFF << I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+
+#define I40E_PF_ARQBAH 0x00080180
+#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080
+#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_PF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380
+#define I40E_PF_ARQH_ARQH_SHIFT 0
+#define I40E_PF_ARQH_ARQH_MASK (0x3FF << I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280
+#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_PF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_PF_ARQLEN_ARQVFE_MASK (0x1 << I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_PF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_PF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_PF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480
+#define I40E_PF_ARQT_ARQT_SHIFT 0
+#define I40E_PF_ARQT_ARQT_MASK (0x3FF << I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100
+#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_PF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000
+#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_PF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300
+#define I40E_PF_ATQH_ATQH_SHIFT 0
+#define I40E_PF_ATQH_ATQH_MASK (0x3FF << I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200
+#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_PF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_PF_ATQLEN_ATQVFE_MASK (0x1 << I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_PF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_PF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_PF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400
+#define I40E_PF_ATQT_ATQT_SHIFT 0
+#define I40E_PF_ATQT_ATQT_MASK (0x3FF << I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAH_MAX_INDEX 127
+#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAL_MAX_INDEX 127
+#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQH_MAX_INDEX 127
+#define I40E_VF_ARQH_ARQH_SHIFT 0
+#define I40E_VF_ARQH_ARQH_MASK (0x3FF << I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQLEN_MAX_INDEX 127
+#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQT_MAX_INDEX 127
+#define I40E_VF_ARQT_ARQT_SHIFT 0
+#define I40E_VF_ARQT_ARQT_MASK (0x3FF << I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAH_MAX_INDEX 127
+#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAL_MAX_INDEX 127
+#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQH_MAX_INDEX 127
+#define I40E_VF_ATQH_ATQH_SHIFT 0
+#define I40E_VF_ATQH_ATQH_MASK (0x3FF << I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQLEN_MAX_INDEX 127
+#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQT_MAX_INDEX 127
+#define I40E_VF_ATQT_ATQT_SHIFT 0
+#define I40E_VF_ATQT_ATQT_MASK (0x3FF << I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20
+#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK (0xFF << I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK (0xFFF << I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL(_pf) (0x0010C300 + ((_pf) * 4))/* _pf=0..15 */
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK (0xFFF << I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK (0x7 << I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK (0x3 << I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK (0x3 << I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i, _pf) (0x0010C100 + ((_i) * 4) + ((_pf) * 16))/* _i=0...3 _pf=0..15 */
+#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
+#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
+#define I40E_PFCM_LANCTXDATA_DATA_MASK (0xFFFFFFFF << I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT(_pf) (0x0010C380 + ((_pf) * 4))/* _pf=0..15 */
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044
+#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
+#define I40E_GLDCB_GENC_PCIRTT_MASK (0xFFFF << I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK (0xFFFFFFFF << I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK (0x3 << I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK (0xFFFF << I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
+#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000
+#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK (0x3 << I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
+#define I40E_PRTDCB_GENC_NUMTC_MASK (0xF << I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
+#define I40E_PRTDCB_GENC_FCOEUP_MASK (0x7 << I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK (0x1 << I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
+#define I40E_PRTDCB_GENC_PFCLDA_MASK (0xFFFF << I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020
+#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK (0x7 << I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK (0x1 << I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
+#define I40E_PRTDCB_MFLCN_DPF_MASK (0x1 << I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK (0x1 << I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK (0x1 << I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK (0xFF << I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK (0xF << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
+#define I40E_PRTDCB_RETSC_LLTC_MASK (0xFF << I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK (0x7F << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK (0x1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK (0x1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK (0xFF << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK (0x7 << I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK (0xFF << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_CPM_MASK (0x1FFF << I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK (0xFF << I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMC_DPM_MASK (0xFF << I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TDPUC 0x00044100
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT 0
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_MASK (0xFFFF << I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TFWSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TFWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TFWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK (0x3FFF << I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94
+#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK (0xF << I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK (0x1 << I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
+#define I40E_GLFCOE_RCTL_ICRC_MASK (0x1 << I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK (0x3FFF << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048
+#define I40E_GL_FWSTS_FWS0B_SHIFT 0
+#define I40E_GL_FWSTS_FWS0B_MASK (0xFF << I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWRI_SHIFT 9
+#define I40E_GL_FWSTS_FWRI_MASK (0x1 << I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_SHIFT 16
+#define I40E_GL_FWSTS_FWS1B_MASK (0xFF << I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184
+#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK (0x1 << I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK (0x3 << I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */
+#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK (0x3 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK (0x7 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK (0xF << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK (0x3 << I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK (0x1F << I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK (0x1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK (0x1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
+#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
+#define I40E_GLGEN_I2CCMD_DATA_MASK (0xFFFF << I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
+#define I40E_GLGEN_I2CCMD_REGADD_MASK (0xFF << I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK (0x7 << I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
+#define I40E_GLGEN_I2CCMD_OP_MASK (0x1 << I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
+#define I40E_GLGEN_I2CCMD_RESET_MASK (0x1 << I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_SHIFT 29
+#define I40E_GLGEN_I2CCMD_R_MASK (0x1 << I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_E_SHIFT 31
+#define I40E_GLGEN_I2CCMD_E_MASK (0x1 << I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK (0x1F << I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK (0x7 << I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK (0x1 << I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK (0x1FFFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK (0x1 << I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK (0x3FFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSCA_MAX_INDEX 3
+#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
+#define I40E_GLGEN_MSCA_MDIADD_MASK (0xFFFF << I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
+#define I40E_GLGEN_MSCA_DEVADD_MASK (0x1F << I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
+#define I40E_GLGEN_MSCA_PHYADD_MASK (0x1F << I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
+#define I40E_GLGEN_MSCA_OPCODE_MASK (0x3 << I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
+#define I40E_GLGEN_MSCA_STCODE_MASK (0x3 << I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
+#define I40E_GLGEN_MSCA_MDICMD_MASK (0x1 << I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK (0x1 << I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSRWD_MAX_INDEX 3
+#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK (0x1F << I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK (0xFF << I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_PE_ENA 0x000B81A0
+#define I40E_GLGEN_PE_ENA_PE_ENA_SHIFT 0
+#define I40E_GLGEN_PE_ENA_PE_ENA_MASK (0x1 << I40E_GLGEN_PE_ENA_PE_ENA_SHIFT)
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT 1
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_MASK (0x3 << I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188
+#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK (0x3 << I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK (0x3 << I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK (0x3 << I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK (0x3F << I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180
+#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK (0x3F << I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RSTENA_EMP 0x000B818C
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190
+#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
+#define I40E_GLGEN_RTRIG_CORER_MASK (0x1 << I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
+#define I40E_GLGEN_RTRIG_GLOBR_MASK (0x1 << I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK (0x1 << I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C
+#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
+#define I40E_GLGEN_STAT_HWRSVD0_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
+#define I40E_GLGEN_STAT_DCBEN_MASK (0x1 << I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_SHIFT 3
+#define I40E_GLGEN_STAT_VTEN_MASK (0x1 << I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
+#define I40E_GLGEN_STAT_FCOEN_MASK (0x1 << I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
+#define I40E_GLGEN_STAT_EVBEN_MASK (0x1 << I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
+#define I40E_GLGEN_STAT_HWRSVD1_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
+#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK (0xFFFFFFFF << I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC
+#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
+#define I40E_GLVFGEN_TIMER_GTIME_MASK (0xFFFFFFFF << I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400
+#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
+#define I40E_PFGEN_CTRL_PFSWR_MASK (0x1 << I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500
+#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK (0x1 << I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480
+#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000
+#define I40E_PFGEN_STATE_PFPEEN_SHIFT 0
+#define I40E_PFGEN_STATE_PFPEEN_MASK (0x1 << I40E_PFGEN_STATE_PFPEEN_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
+#define I40E_PFGEN_STATE_PFFCEN_MASK (0x1 << I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
+#define I40E_PFGEN_STATE_PFLINKEN_MASK (0x1 << I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
+#define I40E_PFGEN_STATE_PFSCEN_MASK (0x1 << I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120
+#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK (0x1 << I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100
+#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
+#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
+#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK (0x1 << I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
+#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK (0x1 << I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
+#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
+#define I40E_VSIGEN_RSTAT_VMRD_MASK (0x1 << I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
+#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK (0x1 << I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4))
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK (0xFFFFF << I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK (0xF << I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK (0x7FFFFF << I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK (0xFFFF << I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK (0xF << I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK (0x1FFF << I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK (0x1FFFF << I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK (0xF << I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK (0x1FFFFFFF << I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK (0x3FFF << I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK (0xF << I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK (0x7FF << I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK (0x7FF << I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK (0xF << I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK (0xFF << I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK (0x7FF << I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK (0xF << I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK (0x1FFFF << I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK (0x7 << I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK (0xF << I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK (0xF << I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK (0x1FFFFF << I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK (0x7FFFFF << I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK (0xF << I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLCNT(_i) (0x000C5500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK (0xF << I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK (0xF << I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK (0xFFFF << I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK (0xF << I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK (0xF << I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLCNT(_i) (0x000C5100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x1FFFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK (0xF << I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK (0xF << I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_SDPART_MAX_INDEX 15
+#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4))
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK (0xFFF << I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK (0x1FF << I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLCNT(_i) (0x000Cd500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFFLCNT(_i) (0x000Cd100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK (0x3FFFFFFF << I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK (0x1F << I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK (0x1 << I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK (0xF << I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK (0x1F << I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK (0x1 << I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000
+#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK (0xFFF << I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF << I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK (0xFFFFF << I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_UFUSE 0x00094008
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK (0x1 << I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_NIC_ID_MASK (0x1 << I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK (0x1 << I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700
+#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_CEQCTL_MAX_INDEX 511
+#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480
+#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780
+#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_PFINT_ICR0_INTEVENT_MASK (0x1 << I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_PFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_PFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_PFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_PFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
+#define I40E_PFINT_ICR0_QUEUE_4_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
+#define I40E_PFINT_ICR0_QUEUE_5_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
+#define I40E_PFINT_ICR0_QUEUE_6_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
+#define I40E_PFINT_ICR0_QUEUE_7_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_GRST_MASK (0x1 << I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_VFLR_MASK (0x1 << I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_SWINT_SHIFT 31
+#define I40E_PFINT_ICR0_SWINT_MASK (0x1 << I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_ENA_GRST_MASK (0x1 << I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK (0x1 << I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */
+#define I40E_PFINT_ITR0_MAX_INDEX 2
+#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4))
+#define I40E_PFINT_ITRN_MAX_INDEX 2
+#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580
+#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATE0_INTERVAL_MASK (0x3F << I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_RATEN_MAX_INDEX 511
+#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATEN_INTERVAL_MASK (0x3F << I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_RQCTL_MAX_INDEX 1535
+#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_RQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_RQCTL_INTEVENT_MASK (0x1 << I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_TQCTL_MAX_INDEX 1535
+#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_TQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_TQCTL_INTEVENT_MASK (0x1 << I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
+#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_MAX_INDEX 127
+#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR0_INTEVENT_MASK (0x1 << I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_SWINT_SHIFT 31
+#define I40E_VFINT_ICR0_SWINT_MASK (0x1 << I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */
+#define I40E_VFINT_ITR0_MAX_INDEX 2
+#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN_MAX_INDEX 2
+#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_AEQCTL_MAX_INDEX 127
+#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_CEQCTL_MAX_INDEX 511
+#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_LNKLST0_MAX_INDEX 127
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_RATE0_MAX_INDEX 127
+#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATE0_INTERVAL_MASK (0x3F << I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_RATEN_MAX_INDEX 511
+#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATEN_INTERVAL_MASK (0x3F << I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK (0x1 << I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
+#define I40E_GL_RDPU_CNTRL_ECO_MASK (0x7FFFFFFF << I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500
+#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK (0x1 << I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK (0xFFF << I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK (0xFFF << I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400
+#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_LASTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_VALID_MASK (0x1 << I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_ENA_MAX_INDEX 1535
+#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QRX_ENA_QENA_REQ_MASK (0x1 << I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QRX_ENA_FAST_QDIS_MASK (0x1 << I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QRX_ENA_QENA_STAT_MASK (0x1 << I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_TAIL_MAX_INDEX 1535
+#define I40E_QRX_TAIL_TAIL_SHIFT 0
+#define I40E_QRX_TAIL_TAIL_MASK (0x1FFF << I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_CTL_MAX_INDEX 1535
+#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
+#define I40E_QTX_CTL_PFVF_Q_MASK (0x3 << I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_SHIFT 2
+#define I40E_QTX_CTL_PF_INDX_MASK (0xF << I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
+#define I40E_QTX_CTL_VFVM_INDX_MASK (0x1FF << I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_ENA_MAX_INDEX 1535
+#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QTX_ENA_QENA_REQ_MASK (0x1 << I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QTX_ENA_FAST_QDIS_MASK (0x1 << I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QTX_ENA_QENA_STAT_MASK (0x1 << I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_HEAD_MAX_INDEX 1535
+#define I40E_QTX_HEAD_HEAD_SHIFT 0
+#define I40E_QTX_HEAD_HEAD_MASK (0x1FFF << I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
+#define I40E_QTX_HEAD_RS_PENDING_MASK (0x1 << I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_TAIL_MAX_INDEX 1535
+#define I40E_QTX_TAIL_TAIL_SHIFT 0
+#define I40E_QTX_TAIL_TAIL_MASK (0x1FFF << I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPLAN_MAPENA_MAX_INDEX 127
+#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK (0x1 << I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VPLAN_QTABLE_MAX_INDEX 15
+#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
+#define I40E_VPLAN_QTABLE_QINDEX_MASK (0x7FF << I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSILAN_QBASE_MAX_INDEX 383
+#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
+#define I40E_VSILAN_QBASE_VSIBASE_MASK (0x7FF << I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSILAN_QTABLE_MAX_INDEX 7
+#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140
+#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
+#define I40E_PRTGL_SAH_FC_SAH_MASK (0xFFFF << I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_MFS_SHIFT 16
+#define I40E_PRTGL_SAH_MFS_MASK (0xFFFF << I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120
+#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
+#define I40E_PRTGL_SAL_FC_SAL_MASK (0xFFFFFFFF << I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HLCTLA 0x001E4760
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT 1
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_MASK (0x1 << I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT)
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT 2
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT)
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP 0x001E3130
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP 0x001E3290
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP 0x001E3310
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP 0x001E3100
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP 0x001E3280
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP 0x001E3300
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE 0x001E3000
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSECTL1 0x001E3560
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT 3
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT 30
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT 31
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 1
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x7 << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 6
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK (0x7 << I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_RSVD_SHIFT 25
+#define I40E_GL_MNG_FWSM_RSVD_MASK (0x1 << I40E_GL_MNG_FWSM_RSVD_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK (0x1 << I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */
+#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK (0xFF << I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
+#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK (0xFFFF << I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
+#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
+#define I40E_PRT_MNG_MAVTV_VID_MASK (0xFFF << I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK (0xFF << I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK (0xFFFF << I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_METF_MAX_INDEX 3
+#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
+#define I40E_PRT_MNG_METF_ETYPE_MASK (0xFFFF << I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
+#define I40E_PRT_MNG_METF_POLARITY_MASK (0x1 << I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK (0xFFFF << I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
+#define I40E_PRT_MNG_MFUTP_UDP_MASK (0x1 << I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
+#define I40E_PRT_MNG_MFUTP_TCP_MASK (0x1 << I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK (0x1 << I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
+#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
+#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
+#define I40E_PRT_MNG_MMAH_MMAH_MASK (0xFFFF << I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
+#define I40E_PRT_MNG_MMAL_MMAL_MASK (0xFFFFFFFF << I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK (0xFF << I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00004900 + ((_i) * 4)) /* _i=0...5 */
+#define I40E_MSIX_PBA_MAX_INDEX 5
+#define I40E_MSIX_PBA_PENBIT_SHIFT 0
+#define I40E_MSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TADD_MAX_INDEX 128
+#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_MSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_MSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TMSG_MAX_INDEX 128
+#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TUADD_MAX_INDEX 128
+#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TVCTRL_MAX_INDEX 128
+#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_MSIX_TVCTRL_MASK_MASK (0x1 << I40E_MSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFMSIX_PBA1(_i) (0x00004944 + ((_i) * 4)) /* _i=0...19 */
+#define I40E_VFMSIX_PBA1_MAX_INDEX 19
+#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA1_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
+#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108
+#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
+#define I40E_GLNVM_FLA_FL_SCK_MASK (0x1 << I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
+#define I40E_GLNVM_FLA_FL_CE_MASK (0x1 << I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
+#define I40E_GLNVM_FLA_FL_SI_MASK (0x1 << I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
+#define I40E_GLNVM_FLA_FL_SO_MASK (0x1 << I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
+#define I40E_GLNVM_FLA_FL_REQ_MASK (0x1 << I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
+#define I40E_GLNVM_FLA_FL_GNT_MASK (0x1 << I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK (0x1 << I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
+#define I40E_GLNVM_FLA_FL_SADDR_MASK (0x7FF << I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
+#define I40E_GLNVM_FLA_FL_BUSY_MASK (0x1 << I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
+#define I40E_GLNVM_FLA_FL_DER_MASK (0x1 << I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104
+#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
+#define I40E_GLNVM_FLASHID_FLASHID_MASK (0xFFFFFF << I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100
+#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
+#define I40E_GLNVM_GENS_NVM_PRES_MASK (0x1 << I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
+#define I40E_GLNVM_GENS_SR_SIZE_MASK (0x7 << I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
+#define I40E_GLNVM_GENS_BANK1VAL_MASK (0x1 << I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
+#define I40E_GLNVM_GENS_ALT_PRST_MASK (0x1 << I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK (0x1 << I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */
+#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK (0xFFFFFF << I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110
+#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK (0x1 << I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
+#define I40E_GLNVM_SRCTL_ADDR_MASK (0x7FFF << I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
+#define I40E_GLNVM_SRCTL_WRITE_MASK (0x1 << I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_START_SHIFT 30
+#define I40E_GLNVM_SRCTL_START_MASK (0x1 << I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
+#define I40E_GLNVM_SRCTL_DONE_MASK (0x1 << I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114
+#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
+#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
+#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+
+#define I40E_GLPCI_BYTCTH 0x0009C484
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4
+#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK (0x1 << I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8
+#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK (0x1 << I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK (0x1 << I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0
+#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
+#define I40E_GLPCI_CNF_FLEX10_MASK (0x1 << I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK (0x1 << I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494
+#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
+#define I40E_GLPCI_CNF2_RO_DIS_MASK (0x1 << I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK (0x1 << I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK (0xFF << I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK (0xFFFFFFFF << I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LATCT 0x0009C4B4
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK (0xFFFFFFFF << I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484
+#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK (0x1 << I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK (0x1 << I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK (0x1 << I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK (0x1 << I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK (0x3F << I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK (0x7 << I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK (0xF << I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PCITEST2 0x000BE4BC
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK (0x1 << I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT)
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK (0x1 << I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT)
+
+#define I40E_GLPCI_PKTCT 0x0009C4BC
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK (0x1 << I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490
+#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK (0x3 << I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4
+#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
+#define I40E_GLPCI_REVID_NVM_REVID_MASK (0xFF << I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C
+#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK (0xFFFF << I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498
+#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK (0xFFFFFFFF << I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SUBSYSID 0x000BE48C
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT 16
+#define I40E_GLPCI_SUBSYSID_SUB_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8
+#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
+#define I40E_GLPCI_UPADD_ADDRESS_MASK (0x7FFFFFFF << I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK (0x1 << I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK (0x1 << I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK (0x7 << I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK (0x1F << I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK (0xFF << I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080
+#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK (0xFFF << I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK (0x7F << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100
+#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
+#define I40E_PF_PCI_CIAD_DATA_MASK (0xFFFFFFFF << I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK (0x1 << I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000
+#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
+#define I40E_PFPCI_CNF_MSI_EN_MASK (0x1 << I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK (0x1 << I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
+#define I40E_PFPCI_CNF_IO_BAR_MASK (0x1 << I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
+#define I40E_PFPCI_CNF_INT_PIN_MASK (0x3 << I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK (0x3 << I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK (0x1 << I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200
+#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK (0x1 << I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK (0xFFFFFFFF << I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK (0xFFFFFFFF << I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PFDEVID 0x000BE080
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300
+#define I40E_PFPCI_PM_PME_EN_SHIFT 0
+#define I40E_PFPCI_PM_PME_EN_MASK (0x1 << I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280
+#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK (0x1 << I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_VFDEVID 0x000BE100
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300
+#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK (0x1FF << I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380
+#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
+#define I40E_PFPCI_VMPEND_PENDING_MASK (0x1 << I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL(_i) (0x0000D480 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPE_PFFLMOBJCTRL_MAX_INDEX 15
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+
+#define I40E_PFPE_MRTEIDXMASK 0x00008600
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK (0x1 << I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4))
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4))
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXUNEXPERR 0x0001E008
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT 0
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_MASK (0xFFFFFF << I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4))
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK (0x3F << I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK (0x3 << I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK (0x3F << I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK (0x1 << I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360
+#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK (0xFFFF << I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK (0x1 << I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0
+#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK (0xFFFF << I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140
+#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK (0x1 << I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
+#define I40E_PRTPM_GC_MNG_VETO_MASK (0x1 << I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_RATD_SHIFT 2
+#define I40E_PRTPM_GC_RATD_MASK (0x1 << I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_SHIFT 3
+#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0
+#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0
+#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828
+#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK (0xFFFFF << I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830
+#define I40E_GLRPB_GHW_GHW_SHIFT 0
+#define I40E_GLRPB_GHW_GHW_MASK (0xFFFFF << I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834
+#define I40E_GLRPB_GLW_GLW_SHIFT 0
+#define I40E_GLRPB_GLW_GLW_MASK (0xFFFFF << I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844
+#define I40E_GLRPB_PHW_PHW_SHIFT 0
+#define I40E_GLRPB_PHW_PHW_MASK (0xFFFFF << I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848
+#define I40E_GLRPB_PLW_PLW_SHIFT 0
+#define I40E_GLRPB_PLW_PLW_MASK (0xFFFFF << I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DHW_MAX_INDEX 7
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DLW_MAX_INDEX 7
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DPS_MAX_INDEX 7
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK (0xFFFFF << I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SHT_MAX_INDEX 7
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK (0xFFFFF << I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SLT_MAX_INDEX 7
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK (0xFFFFF << I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK (0xFFFFF << I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK (0xFFFFFFFF << I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4
+#define I40E_GLQF_CTL_HTOEP_SHIFT 1
+#define I40E_GLQF_CTL_HTOEP_MASK (0x1 << I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_RSVD_SHIFT 7
+#define I40E_GLQF_CTL_RSVD_MASK (0x1 << I40E_GLQF_CTL_RSVD_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_SHIFT 17
+#define I40E_GLQF_CTL_FDBEST_MASK (0xFF << I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
+#define I40E_GLQF_CTL_PROGPRIO_MASK (0x1 << I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
+#define I40E_GLQF_CTL_INVALPRIO_MASK (0x1 << I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
+#define I40E_GLQF_CTL_IGNORE_IP_MASK (0x1 << I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */
+#define I40E_GLQF_HSYM_MAX_INDEX 63
+#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK (0x1 << I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */
+#define I40E_GLQF_PCNT_MAX_INDEX 511
+#define I40E_GLQF_PCNT_PCNT_SHIFT 0
+#define I40E_GLQF_PCNT_PCNT_MASK (0xFFFFFFFF << I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */
+#define I40E_GLQF_SWAP_MAX_INDEX 1
+#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
+#define I40E_GLQF_SWAP_FLEN0_MASK (0xF << I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
+#define I40E_GLQF_SWAP_FLEN1_MASK (0xF << I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0
+#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK (0x1 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
+#define I40E_PFQF_CTL_0_FD_ENA_MASK (0x1 << I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK (0x1 << I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK (0x1 << I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK (0x1 << I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280
+#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK (0xFF << I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
+#define I40E_PFQF_FDALLOC_FDBEST_MASK (0xFF << I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */
+#define I40E_PFQF_HENA_MAX_INDEX 1
+#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */
+#define I40E_PFQF_HKEY_MAX_INDEX 12
+#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_PFQF_HKEY_KEY_0_MASK (0xFF << I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_PFQF_HKEY_KEY_1_MASK (0xFF << I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_PFQF_HKEY_KEY_2_MASK (0xFF << I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_PFQF_HKEY_KEY_3_MASK (0xFF << I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */
+#define I40E_PFQF_HLUT_MAX_INDEX 127
+#define I40E_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_PFQF_HLUT_LUT0_MASK (0x3F << I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_PFQF_HLUT_LUT1_MASK (0x3F << I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_PFQF_HLUT_LUT2_MASK (0x3F << I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_PFQF_HLUT_LUT3_MASK (0x3F << I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK (0x7 << I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK (0x7 << I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK (0x7 << I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK (0x7 << I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK (0x7 << I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK (0x7 << I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK (0x7 << I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK (0x7 << I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60
+#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK (0x1 << I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */
+#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK (0xFF << I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */
+#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
+#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
+#define I40E_PRTQF_FD_MSK_MASK_MASK (0xFFFF << I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK (0x3F << I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
+#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x1F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0x1F << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HENA1_MAX_INDEX 1
+#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */
+#define I40E_VFQF_HKEY1_MAX_INDEX 12
+#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY1_KEY_0_MASK (0xFF << I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY1_KEY_1_MASK (0xFF << I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY1_KEY_2_MASK (0xFF << I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY1_KEY_3_MASK (0xFF << I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT1_LUT0_MASK (0xF << I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT1_LUT1_MASK (0xF << I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT1_LUT2_MASK (0xF << I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT1_LUT3_MASK (0xF << I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HREGION1_MAX_INDEX 7
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION1_REGION_0_MASK (0x7 << I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION1_REGION_1_MASK (0x7 << I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION1_REGION_2_MASK (0x7 << I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION1_REGION_3_MASK (0x7 << I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION1_REGION_4_MASK (0x7 << I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION1_REGION_5_MASK (0x7 << I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION1_REGION_6_MASK (0x7 << I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION1_REGION_7_MASK (0x7 << I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPQF_CTL_MAX_INDEX 127
+#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
+#define I40E_VPQF_CTL_PEHSIZE_MASK (0x1F << I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
+#define I40E_VPQF_CTL_PEDSIZE_MASK (0x1F << I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
+#define I40E_VPQF_CTL_FCHSIZE_MASK (0xF << I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
+#define I40E_VPQF_CTL_FCDSIZE_MASK (0x3 << I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIQF_CTL_MAX_INDEX 383
+#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK (0x1 << I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSIQF_TCREGION_MAX_INDEX 3
+#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOECRC_MAX_INDEX 143
+#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
+#define I40E_GL_FCOECRC_FCOECRC_MASK (0xFFFFFFFF << I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDDPC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+/* _i=0...143 */
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFRC(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFRC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXAC(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXAC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT 0
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK (0xFFFF << I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK (0xFFFF << I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOELAST_MAX_INDEX 143
+#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
+#define I40E_GL_FCOELAST_FCOELAST_MASK (0xFFFFFFFF << I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPRC_MAX_INDEX 143
+#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK (0xFFFFFFFF << I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPTC_MAX_INDEX 143
+#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK (0xFFFFFFFF << I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOERPDC_MAX_INDEX 143
+#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK (0xFFFFFFFF << I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCH_MAX_INDEX 3
+#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCL_MAX_INDEX 3
+#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCH_MAX_INDEX 3
+#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCL_MAX_INDEX 3
+#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
+#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK (0xFFFFFFFF << I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCH_MAX_INDEX 3
+#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
+#define I40E_GLPRT_GORCH_GORCH_MASK (0xFFFF << I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCL_MAX_INDEX 3
+#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
+#define I40E_GLPRT_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCH_MAX_INDEX 3
+#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLPRT_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCL_MAX_INDEX 3
+#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLPRT_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
+#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK (0xFFFFFFFF << I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LDPC_MAX_INDEX 3
+#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
+#define I40E_GLPRT_LDPC_LDPC_MASK (0xFFFFFFFF << I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MLFC_MAX_INDEX 3
+#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
+#define I40E_GLPRT_MLFC_MLFC_MASK (0xFFFFFFFF << I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCH_MAX_INDEX 3
+#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLPRT_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCL_MAX_INDEX 3
+#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLPRT_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCH_MAX_INDEX 3
+#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLPRT_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCL_MAX_INDEX 3
+#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLPRT_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MRFC_MAX_INDEX 3
+#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
+#define I40E_GLPRT_MRFC_MRFC_MASK (0xFFFFFFFF << I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK (0xFFFF << I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127H_MAX_INDEX 3
+#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
+#define I40E_GLPRT_PRC127H_PRC127H_MASK (0xFFFF << I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127L_MAX_INDEX 3
+#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
+#define I40E_GLPRT_PRC127L_PRC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255H_MAX_INDEX 3
+#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK (0xFFFF << I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255L_MAX_INDEX 3
+#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
+#define I40E_GLPRT_PRC255L_PRC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511H_MAX_INDEX 3
+#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
+#define I40E_GLPRT_PRC511H_PRC511H_MASK (0xFFFF << I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511L_MAX_INDEX 3
+#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
+#define I40E_GLPRT_PRC511L_PRC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64H_MAX_INDEX 3
+#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
+#define I40E_GLPRT_PRC64H_PRC64H_MASK (0xFFFF << I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64L_MAX_INDEX 3
+#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
+#define I40E_GLPRT_PRC64L_PRC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK (0xFFFF << I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127H_MAX_INDEX 3
+#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
+#define I40E_GLPRT_PTC127H_PTC127H_MASK (0xFFFF << I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127L_MAX_INDEX 3
+#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
+#define I40E_GLPRT_PTC127L_PTC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK (0xFFFF << I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255H_MAX_INDEX 3
+#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
+#define I40E_GLPRT_PTC255H_PTC255H_MASK (0xFFFF << I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255L_MAX_INDEX 3
+#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
+#define I40E_GLPRT_PTC255L_PTC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511H_MAX_INDEX 3
+#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
+#define I40E_GLPRT_PTC511H_PTC511H_MASK (0xFFFF << I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511L_MAX_INDEX 3
+#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
+#define I40E_GLPRT_PTC511L_PTC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64H_MAX_INDEX 3
+#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
+#define I40E_GLPRT_PTC64H_PTC64H_MASK (0xFFFF << I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64L_MAX_INDEX 3
+#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
+#define I40E_GLPRT_PTC64L_PTC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK (0xFFFF << I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RDPC_MAX_INDEX 3
+#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
+#define I40E_GLPRT_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RFC_MAX_INDEX 3
+#define I40E_GLPRT_RFC_RFC_SHIFT 0
+#define I40E_GLPRT_RFC_RFC_MASK (0xFFFFFFFF << I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RJC_MAX_INDEX 3
+#define I40E_GLPRT_RJC_RJC_SHIFT 0
+#define I40E_GLPRT_RJC_RJC_MASK (0xFFFFFFFF << I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RLEC_MAX_INDEX 3
+#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
+#define I40E_GLPRT_RLEC_RLEC_MASK (0xFFFFFFFF << I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ROC_MAX_INDEX 3
+#define I40E_GLPRT_ROC_ROC_SHIFT 0
+#define I40E_GLPRT_ROC_ROC_MASK (0xFFFFFFFF << I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUC_MAX_INDEX 3
+#define I40E_GLPRT_RUC_RUC_SHIFT 0
+#define I40E_GLPRT_RUC_RUC_MASK (0xFFFFFFFF << I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUPP_MAX_INDEX 3
+#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
+#define I40E_GLPRT_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK (0xFFFFFFFF << I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_STDC(_i) (0x00300640 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_STDC_MAX_INDEX 3
+#define I40E_GLPRT_STDC_STDC_SHIFT 0
+#define I40E_GLPRT_STDC_STDC_MASK (0xFFFFFFFF << I40E_GLPRT_STDC_STDC_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDOLD_MAX_INDEX 3
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK (0xFFFFFFFF << I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDPC_MAX_INDEX 3
+#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
+#define I40E_GLPRT_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLPRT_TDPC_TDPC_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCH_MAX_INDEX 3
+#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCL_MAX_INDEX 3
+#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLPRT_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCH_MAX_INDEX 3
+#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCL_MAX_INDEX 3
+#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK (0xFFFFFFFF << I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCH_MAX_INDEX 15
+#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLSW_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCL_MAX_INDEX 15
+#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLSW_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCH_MAX_INDEX 15
+#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLSW_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCL_MAX_INDEX 15
+#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLSW_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCH_MAX_INDEX 15
+#define I40E_GLSW_GORCH_GORCH_SHIFT 0
+#define I40E_GLSW_GORCH_GORCH_MASK (0xFFFF << I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCL_MAX_INDEX 15
+#define I40E_GLSW_GORCL_GORCL_SHIFT 0
+#define I40E_GLSW_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCH_MAX_INDEX 15
+#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLSW_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCL_MAX_INDEX 15
+#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLSW_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCH_MAX_INDEX 15
+#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLSW_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCL_MAX_INDEX 15
+#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLSW_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCH_MAX_INDEX 15
+#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLSW_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCL_MAX_INDEX 15
+#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLSW_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_RUPP_MAX_INDEX 15
+#define I40E_GLSW_RUPP_RUPP_SHIFT 0
+#define I40E_GLSW_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_TDPC_MAX_INDEX 15
+#define I40E_GLSW_TDPC_TDPC_SHIFT 0
+#define I40E_GLSW_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCH_MAX_INDEX 15
+#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLSW_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCL_MAX_INDEX 15
+#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLSW_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCH_MAX_INDEX 15
+#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLSW_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCL_MAX_INDEX 15
+#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLSW_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCH_MAX_INDEX 383
+#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLV_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCL_MAX_INDEX 383
+#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLV_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCH_MAX_INDEX 383
+#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLV_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCL_MAX_INDEX 383
+#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLV_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCH_MAX_INDEX 383
+#define I40E_GLV_GORCH_GORCH_SHIFT 0
+#define I40E_GLV_GORCH_GORCH_MASK (0xFFFF << I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCL_MAX_INDEX 383
+#define I40E_GLV_GORCL_GORCL_SHIFT 0
+#define I40E_GLV_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCH_MAX_INDEX 383
+#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLV_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCL_MAX_INDEX 383
+#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLV_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCH_MAX_INDEX 383
+#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLV_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCL_MAX_INDEX 383
+#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLV_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCH_MAX_INDEX 383
+#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLV_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCL_MAX_INDEX 383
+#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLV_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RDPC_MAX_INDEX 383
+#define I40E_GLV_RDPC_RDPC_SHIFT 0
+#define I40E_GLV_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RUPP_MAX_INDEX 383
+#define I40E_GLV_RUPP_RUPP_SHIFT 0
+#define I40E_GLV_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */
+#define I40E_GLV_TEPC_MAX_INDEX 383
+#define I40E_GLV_TEPC_TEPC_SHIFT 0
+#define I40E_GLV_TEPC_TEPC_MASK (0xFFFFFFFF << I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCH_MAX_INDEX 383
+#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLV_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCL_MAX_INDEX 383
+#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLV_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCH_MAX_INDEX 383
+#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK (0xFFFF << I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCL_MAX_INDEX 383
+#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLV_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK (0xFFFF << I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK (0xFFFF << I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK (0xFFFF << I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK (0xFFFF << I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_L 0x00269F44
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_MASK (0xFFFFFFFF << I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x0026CF00 + ((_i) * 4)) /* _i=0...25 */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 25
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN 0x0026CF84
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRT_MSCCNT 0x00256BA0
+#define I40E_PRT_MSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_MSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_MSCCNT_CCOUNT_SHIFT)
+#define I40E_PRT_SCSTS 0x00256C20
+#define I40E_PRT_SCSTS_BSCA_SHIFT 0
+#define I40E_PRT_SCSTS_BSCA_MASK (0x1 << I40E_PRT_SCSTS_BSCA_SHIFT)
+#define I40E_PRT_SCSTS_BSCAP_SHIFT 1
+#define I40E_PRT_SCSTS_BSCAP_MASK (0x1 << I40E_PRT_SCSTS_BSCAP_SHIFT)
+#define I40E_PRT_SCSTS_MSCA_SHIFT 2
+#define I40E_PRT_SCSTS_MSCA_MASK (0x1 << I40E_PRT_SCSTS_MSCA_SHIFT)
+#define I40E_PRT_SCSTS_MSCAP_SHIFT 3
+#define I40E_PRT_SCSTS_MSCAP_MASK (0x1 << I40E_PRT_SCSTS_MSCAP_SHIFT)
+#define I40E_PRT_SWT_BSCCNT 0x00256C60
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280
+#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK (0x7FFFFFFF << I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
+#define I40E_PRTTSYN_ADJ_SIGN_MASK (0x1 << I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK (0x3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK (0xF << I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK (0x3 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK (0x1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK (0x1 << I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK (0xFFFFFFFF << I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK (0x1 << I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK (0xF << I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK (0x3 << I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK (0x3 << I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK (0x3 << I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK (0x3F << I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK (0x1 << I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140
+#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GLSCD_QUANTA 0x000B2080
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK (0x7 << I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510
+#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_RX_FUNCTION_MASK (0xFF << I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_SHIFT 8
+#define I40E_GL_MDET_RX_EVENT_MASK (0x1FF << I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_RX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_SHIFT 31
+#define I40E_GL_MDET_RX_VALID_MASK (0x1 << I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480
+#define I40E_GL_MDET_TX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_TX_FUNCTION_MASK (0xFF << I40E_GL_MDET_TX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 8
+#define I40E_GL_MDET_TX_EVENT_MASK (0x1FF << I40E_GL_MDET_TX_EVENT_SHIFT)
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_TX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VALID_SHIFT 31
+#define I40E_GL_MDET_TX_VALID_MASK (0x1 << I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400
+#define I40E_PF_MDET_RX_VALID_SHIFT 0
+#define I40E_PF_MDET_RX_VALID_MASK (0x1 << I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400
+#define I40E_PF_MDET_TX_VALID_SHIFT 0
+#define I40E_PF_MDET_TX_VALID_MASK (0x1 << I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500
+#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_VALID_MASK (0x1 << I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_RX_MAX_INDEX 127
+#define I40E_VP_MDET_RX_VALID_SHIFT 0
+#define I40E_VP_MDET_RX_VALID_MASK (0x1 << I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_TX_MAX_INDEX 127
+#define I40E_VP_MDET_TX_VALID_SHIFT 0
+#define I40E_VP_MDET_TX_VALID_MASK (0x1 << I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800
+#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
+#define I40E_GLPM_WUMC_NOTCO_MASK (0x1 << I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK (0x1 << I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
+#define I40E_GLPM_WUMC_ROL_MODE_MASK (0x1 << I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
+#define I40E_GLPM_WUMC_RESERVED_4_MASK (0x1FFF << I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK (0xFFFF << I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080
+#define I40E_PFPM_APM_APME_SHIFT 0
+#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200
+#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
+#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400
+#define I40E_PFPM_WUFC_LNKC_SHIFT 0
+#define I40E_PFPM_WUFC_LNKC_MASK (0x1 << I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_MAG_SHIFT 1
+#define I40E_PFPM_WUFC_MAG_MASK (0x1 << I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_SHIFT 3
+#define I40E_PFPM_WUFC_MNG_MASK (0x1 << I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_SHIFT 16
+#define I40E_PFPM_WUFC_FLX0_MASK (0x1 << I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_SHIFT 17
+#define I40E_PFPM_WUFC_FLX1_MASK (0x1 << I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_SHIFT 18
+#define I40E_PFPM_WUFC_FLX2_MASK (0x1 << I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_SHIFT 19
+#define I40E_PFPM_WUFC_FLX3_MASK (0x1 << I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_SHIFT 20
+#define I40E_PFPM_WUFC_FLX4_MASK (0x1 << I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_SHIFT 21
+#define I40E_PFPM_WUFC_FLX5_MASK (0x1 << I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_SHIFT 22
+#define I40E_PFPM_WUFC_FLX6_MASK (0x1 << I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_SHIFT 23
+#define I40E_PFPM_WUFC_FLX7_MASK (0x1 << I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600
+#define I40E_PFPM_WUS_LNKC_SHIFT 0
+#define I40E_PFPM_WUS_LNKC_MASK (0x1 << I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_MAG_SHIFT 1
+#define I40E_PFPM_WUS_MAG_MASK (0x1 << I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
+#define I40E_PFPM_WUS_PME_STATUS_MASK (0x1 << I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_MNG_SHIFT 3
+#define I40E_PFPM_WUS_MNG_MASK (0x1 << I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_FLX0_SHIFT 16
+#define I40E_PFPM_WUS_FLX0_MASK (0x1 << I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX1_SHIFT 17
+#define I40E_PFPM_WUS_FLX1_MASK (0x1 << I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX2_SHIFT 18
+#define I40E_PFPM_WUS_FLX2_MASK (0x1 << I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX3_SHIFT 19
+#define I40E_PFPM_WUS_FLX3_MASK (0x1 << I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX4_SHIFT 20
+#define I40E_PFPM_WUS_FLX4_MASK (0x1 << I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX5_SHIFT 21
+#define I40E_PFPM_WUS_FLX5_MASK (0x1 << I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX6_SHIFT 22
+#define I40E_PFPM_WUS_FLX6_MASK (0x1 << I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX7_SHIFT 23
+#define I40E_PFPM_WUS_FLX7_MASK (0x1 << I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUS_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000
+#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
+#define I40E_PRTPM_FHFHR_UNICAST_MASK (0x1 << I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK (0x1 << I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAH_MAX_INDEX 3
+#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK (0xFFFF << I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
+#define I40E_PRTPM_SAH_PF_NUM_MASK (0xF << I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK (0x1 << I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_AV_SHIFT 31
+#define I40E_PRTPM_SAH_AV_MASK (0x1 << I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAL_MAX_INDEX 3
+#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK (0xFFFFFFFF << I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#define I40E_VF_ARQBAH1 0x00006000
+#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH1_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00
+#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL1_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400
+#define I40E_VF_ARQH1_ARQH_SHIFT 0
+#define I40E_VF_ARQH1_ARQH_MASK (0x3FF << I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000
+#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN1_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN1_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000
+#define I40E_VF_ARQT1_ARQT_SHIFT 0
+#define I40E_VF_ARQT1_ARQT_MASK (0x3FF << I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800
+#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH1_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00
+#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL1_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400
+#define I40E_VF_ATQH1_ATQH_SHIFT 0
+#define I40E_VF_ATQH1_ATQH_MASK (0x3FF << I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800
+#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN1_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN1_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400
+#define I40E_VF_ATQT1_ATQT_SHIFT 0
+#define I40E_VF_ATQT1_ATQT_MASK (0x3FF << I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800
+#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00
+#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4))
+#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
+#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800
+#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR01_INTEVENT_MASK (0x1 << I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR01_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR01_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR01_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR01_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR01_ADMINQ_MASK (0x1 << I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_SWINT_SHIFT 31
+#define I40E_VFINT_ICR01_SWINT_MASK (0x1 << I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */
+#define I40E_VFINT_ITR01_MAX_INDEX 2
+#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR01_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN1_MAX_INDEX 2
+#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN1_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QRX_TAIL1_MAX_INDEX 15
+#define I40E_QRX_TAIL1_TAIL_SHIFT 0
+#define I40E_QRX_TAIL1_TAIL_MASK (0x1FFF << I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QTX_TAIL1_MAX_INDEX 15
+#define I40E_QTX_TAIL1_TAIL_SHIFT 0
+#define I40E_QTX_TAIL1_TAIL_MASK (0x1FFF << I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000
+#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD_MAX_INDEX 16
+#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG_MAX_INDEX 16
+#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD_MAX_INDEX 16
+#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
+#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */
+#define I40E_VFQF_HENA_MAX_INDEX 1
+#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */
+#define I40E_VFQF_HKEY_MAX_INDEX 12
+#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY_KEY_0_MASK (0xFF << I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY_KEY_1_MASK (0xFF << I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY_KEY_2_MASK (0xFF << I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY_KEY_3_MASK (0xFF << I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_VFQF_HLUT_MAX_INDEX 15
+#define I40E_VFQF_HLUT_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT_LUT0_MASK (0xF << I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT_LUT1_MASK (0xF << I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT_LUT2_MASK (0xF << I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT_LUT3_MASK (0xF << I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */
+#define I40E_VFQF_HREGION_MAX_INDEX 7
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION_REGION_0_MASK (0x7 << I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION_REGION_1_MASK (0x7 << I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION_REGION_2_MASK (0x7 << I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION_REGION_3_MASK (0x7 << I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION_REGION_4_MASK (0x7 << I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION_REGION_5_MASK (0x7 << I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION_REGION_6_MASK (0x7 << I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK (0x7 << I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT)
+#endif
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
new file mode 100644
index 000000000000..7c08cc2e339b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h
@@ -0,0 +1,97 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_STATUS_H_
+#define _I40E_STATUS_H_
+
+/* Error Codes */
+enum i40e_status_code {
+	I40E_SUCCESS				= 0,
+	I40E_ERR_NVM				= -1,
+	I40E_ERR_NVM_CHECKSUM			= -2,
+	I40E_ERR_PHY				= -3,
+	I40E_ERR_CONFIG				= -4,
+	I40E_ERR_PARAM				= -5,
+	I40E_ERR_MAC_TYPE			= -6,
+	I40E_ERR_UNKNOWN_PHY			= -7,
+	I40E_ERR_LINK_SETUP			= -8,
+	I40E_ERR_ADAPTER_STOPPED		= -9,
+	I40E_ERR_INVALID_MAC_ADDR		= -10,
+	I40E_ERR_DEVICE_NOT_SUPPORTED		= -11,
+	I40E_ERR_MASTER_REQUESTS_PENDING	= -12,
+	I40E_ERR_INVALID_LINK_SETTINGS		= -13,
+	I40E_ERR_AUTONEG_NOT_COMPLETE		= -14,
+	I40E_ERR_RESET_FAILED			= -15,
+	I40E_ERR_SWFW_SYNC			= -16,
+	I40E_ERR_NO_AVAILABLE_VSI		= -17,
+	I40E_ERR_NO_MEMORY			= -18,
+	I40E_ERR_BAD_PTR			= -19,
+	I40E_ERR_RING_FULL			= -20,
+	I40E_ERR_INVALID_PD_ID			= -21,
+	I40E_ERR_INVALID_QP_ID			= -22,
+	I40E_ERR_INVALID_CQ_ID			= -23,
+	I40E_ERR_INVALID_CEQ_ID			= -24,
+	I40E_ERR_INVALID_AEQ_ID			= -25,
+	I40E_ERR_INVALID_SIZE			= -26,
+	I40E_ERR_INVALID_ARP_INDEX		= -27,
+	I40E_ERR_INVALID_FPM_FUNC_ID		= -28,
+	I40E_ERR_QP_INVALID_MSG_SIZE		= -29,
+	I40E_ERR_QP_TOOMANY_WRS_POSTED		= -30,
+	I40E_ERR_INVALID_FRAG_COUNT		= -31,
+	I40E_ERR_QUEUE_EMPTY			= -32,
+	I40E_ERR_INVALID_ALIGNMENT		= -33,
+	I40E_ERR_FLUSHED_QUEUE			= -34,
+	I40E_ERR_INVALID_PUSH_PAGE_INDEX	= -35,
+	I40E_ERR_INVALID_IMM_DATA_SIZE		= -36,
+	I40E_ERR_TIMEOUT			= -37,
+	I40E_ERR_OPCODE_MISMATCH		= -38,
+	I40E_ERR_CQP_COMPL_ERROR		= -39,
+	I40E_ERR_INVALID_VF_ID			= -40,
+	I40E_ERR_INVALID_HMCFN_ID		= -41,
+	I40E_ERR_BACKING_PAGE_ERROR		= -42,
+	I40E_ERR_NO_PBLCHUNKS_AVAILABLE		= -43,
+	I40E_ERR_INVALID_PBLE_INDEX		= -44,
+	I40E_ERR_INVALID_SD_INDEX		= -45,
+	I40E_ERR_INVALID_PAGE_DESC_INDEX	= -46,
+	I40E_ERR_INVALID_SD_TYPE		= -47,
+	I40E_ERR_MEMCPY_FAILED			= -48,
+	I40E_ERR_INVALID_HMC_OBJ_INDEX		= -49,
+	I40E_ERR_INVALID_HMC_OBJ_COUNT		= -50,
+	I40E_ERR_INVALID_SRQ_ARM_LIMIT		= -51,
+	I40E_ERR_SRQ_ENABLED			= -52,
+	I40E_ERR_ADMIN_QUEUE_ERROR		= -53,
+	I40E_ERR_ADMIN_QUEUE_TIMEOUT		= -54,
+	I40E_ERR_BUF_TOO_SHORT			= -55,
+	I40E_ERR_ADMIN_QUEUE_FULL		= -56,
+	I40E_ERR_ADMIN_QUEUE_NO_WORK		= -57,
+	I40E_ERR_BAD_IWARP_CQE			= -58,
+	I40E_ERR_NVM_BLANK_MODE			= -59,
+	I40E_ERR_NOT_IMPLEMENTED		= -60,
+	I40E_ERR_PE_DOORBELL_NOT_ENABLED	= -61,
+	I40E_ERR_DIAG_TEST_FAILED		= -62,
+	I40E_ERR_NOT_READY			= -63,
+	I40E_NOT_SUPPORTED			= -64,
+	I40E_ERR_FIRMWARE_API_VERSION		= -65,
+};
+
+#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
new file mode 100644
index 000000000000..ffdb01d853db
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -0,0 +1,1575 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include <linux/prefetch.h>
+
+#include "i40evf.h"
+
+static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
+				u32 td_tag)
+{
+	return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
+			   ((u64)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |
+			   ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+			   ((u64)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+			   ((u64)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+
+/**
+ * i40e_unmap_and_free_tx_resource - Release a Tx buffer
+ * @ring:      the ring that owns the buffer
+ * @tx_buffer: the buffer to free
+ **/
+static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
+					    struct i40e_tx_buffer *tx_buffer)
+{
+	if (tx_buffer->skb) {
+		dev_kfree_skb_any(tx_buffer->skb);
+		if (dma_unmap_len(tx_buffer, len))
+			dma_unmap_single(ring->dev,
+					 dma_unmap_addr(tx_buffer, dma),
+					 dma_unmap_len(tx_buffer, len),
+					 DMA_TO_DEVICE);
+	} else if (dma_unmap_len(tx_buffer, len)) {
+		dma_unmap_page(ring->dev,
+			       dma_unmap_addr(tx_buffer, dma),
+			       dma_unmap_len(tx_buffer, len),
+			       DMA_TO_DEVICE);
+	}
+	tx_buffer->next_to_watch = NULL;
+	tx_buffer->skb = NULL;
+	dma_unmap_len_set(tx_buffer, len, 0);
+	/* tx_buffer must be completely set up in the transmit path */
+}
+
+/**
+ * i40evf_clean_tx_ring - Free any empty Tx buffers
+ * @tx_ring: ring to be cleaned
+ **/
+void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
+{
+	unsigned long bi_size;
+	u16 i;
+
+	/* ring already cleared, nothing to do */
+	if (!tx_ring->tx_bi)
+		return;
+
+	/* Free all the Tx ring sk_buffs */
+	for (i = 0; i < tx_ring->count; i++)
+		i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
+
+	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+	memset(tx_ring->tx_bi, 0, bi_size);
+
+	/* Zero out the descriptor ring */
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+
+	if (!tx_ring->netdev)
+		return;
+
+	/* cleanup Tx queue statistics */
+	netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+						  tx_ring->queue_index));
+}
+
+/**
+ * i40evf_free_tx_resources - Free Tx resources per queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
+{
+	i40evf_clean_tx_ring(tx_ring);
+	kfree(tx_ring->tx_bi);
+	tx_ring->tx_bi = NULL;
+
+	if (tx_ring->desc) {
+		dma_free_coherent(tx_ring->dev, tx_ring->size,
+				  tx_ring->desc, tx_ring->dma);
+		tx_ring->desc = NULL;
+	}
+}
+
+/**
+ * i40e_get_tx_pending - how many tx descriptors not processed
+ * @tx_ring: the ring of descriptors
+ *
+ * Since there is no access to the ring head register
+ * in XL710, we need to use our local copies
+ **/
+static u32 i40e_get_tx_pending(struct i40e_ring *ring)
+{
+	u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
+			? ring->next_to_use
+			: ring->next_to_use + ring->count);
+	return ntu - ring->next_to_clean;
+}
+
+/**
+ * i40e_check_tx_hang - Is there a hang in the Tx queue
+ * @tx_ring: the ring of descriptors
+ **/
+static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
+{
+	u32 tx_pending = i40e_get_tx_pending(tx_ring);
+	bool ret = false;
+
+	clear_check_for_tx_hang(tx_ring);
+
+	/* Check for a hung queue, but be thorough. This verifies
+	 * that a transmit has been completed since the previous
+	 * check AND there is at least one packet pending. The
+	 * ARMED bit is set to indicate a potential hang. The
+	 * bit is cleared if a pause frame is received to remove
+	 * false hang detection due to PFC or 802.3x frames. By
+	 * requiring this to fail twice we avoid races with
+	 * PFC clearing the ARMED bit and conditions where we
+	 * run the check_tx_hang logic with a transmit completion
+	 * pending but without time to complete it yet.
+	 */
+	if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
+	    tx_pending) {
+		/* make sure it is true for two checks in a row */
+		ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
+				       &tx_ring->state);
+	} else {
+		/* update completed stats and disarm the hang check */
+		tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+		clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
+	}
+
+	return ret;
+}
+
+/**
+ * i40e_clean_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring:  tx ring to clean
+ * @budget:   how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+{
+	u16 i = tx_ring->next_to_clean;
+	struct i40e_tx_buffer *tx_buf;
+	struct i40e_tx_desc *tx_desc;
+	unsigned int total_packets = 0;
+	unsigned int total_bytes = 0;
+
+	tx_buf = &tx_ring->tx_bi[i];
+	tx_desc = I40E_TX_DESC(tx_ring, i);
+	i -= tx_ring->count;
+
+	do {
+		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+		/* if next_to_watch is not set then there is no work pending */
+		if (!eop_desc)
+			break;
+
+		/* prevent any other reads prior to eop_desc */
+		read_barrier_depends();
+
+		/* if the descriptor isn't done, no work yet to do */
+		if (!(eop_desc->cmd_type_offset_bsz &
+		      cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+			break;
+
+		/* clear next_to_watch to prevent false hangs */
+		tx_buf->next_to_watch = NULL;
+
+		/* update the statistics for this packet */
+		total_bytes += tx_buf->bytecount;
+		total_packets += tx_buf->gso_segs;
+
+		/* free the skb */
+		dev_kfree_skb_any(tx_buf->skb);
+
+		/* unmap skb header data */
+		dma_unmap_single(tx_ring->dev,
+				 dma_unmap_addr(tx_buf, dma),
+				 dma_unmap_len(tx_buf, len),
+				 DMA_TO_DEVICE);
+
+		/* clear tx_buffer data */
+		tx_buf->skb = NULL;
+		dma_unmap_len_set(tx_buf, len, 0);
+
+		/* unmap remaining buffers */
+		while (tx_desc != eop_desc) {
+
+			tx_buf++;
+			tx_desc++;
+			i++;
+			if (unlikely(!i)) {
+				i -= tx_ring->count;
+				tx_buf = tx_ring->tx_bi;
+				tx_desc = I40E_TX_DESC(tx_ring, 0);
+			}
+
+			/* unmap any remaining paged data */
+			if (dma_unmap_len(tx_buf, len)) {
+				dma_unmap_page(tx_ring->dev,
+					       dma_unmap_addr(tx_buf, dma),
+					       dma_unmap_len(tx_buf, len),
+					       DMA_TO_DEVICE);
+				dma_unmap_len_set(tx_buf, len, 0);
+			}
+		}
+
+		/* move us one more past the eop_desc for start of next pkt */
+		tx_buf++;
+		tx_desc++;
+		i++;
+		if (unlikely(!i)) {
+			i -= tx_ring->count;
+			tx_buf = tx_ring->tx_bi;
+			tx_desc = I40E_TX_DESC(tx_ring, 0);
+		}
+
+		/* update budget accounting */
+		budget--;
+	} while (likely(budget));
+
+	i += tx_ring->count;
+	tx_ring->next_to_clean = i;
+	u64_stats_update_begin(&tx_ring->syncp);
+	tx_ring->stats.bytes += total_bytes;
+	tx_ring->stats.packets += total_packets;
+	u64_stats_update_end(&tx_ring->syncp);
+	tx_ring->q_vector->tx.total_bytes += total_bytes;
+	tx_ring->q_vector->tx.total_packets += total_packets;
+
+	if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
+		/* schedule immediate reset if we believe we hung */
+		dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
+			 "  VSI                  <%d>\n"
+			 "  Tx Queue             <%d>\n"
+			 "  next_to_use          <%x>\n"
+			 "  next_to_clean        <%x>\n",
+			 tx_ring->vsi->seid,
+			 tx_ring->queue_index,
+			 tx_ring->next_to_use, i);
+		dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
+			 "  time_stamp           <%lx>\n"
+			 "  jiffies              <%lx>\n",
+			 tx_ring->tx_bi[i].time_stamp, jiffies);
+
+		netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+		dev_info(tx_ring->dev,
+			 "tx hang detected on queue %d, resetting adapter\n",
+			 tx_ring->queue_index);
+
+		tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
+
+		/* the adapter is about to reset, no point in enabling stuff */
+		return true;
+	}
+
+	netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
+						      tx_ring->queue_index),
+				  total_packets, total_bytes);
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+		     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+		/* Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+		if (__netif_subqueue_stopped(tx_ring->netdev,
+					     tx_ring->queue_index) &&
+		   !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+			netif_wake_subqueue(tx_ring->netdev,
+					    tx_ring->queue_index);
+			++tx_ring->tx_stats.restart_queue;
+		}
+	}
+
+	return budget > 0;
+}
+
+/**
+ * i40e_set_new_dynamic_itr - Find new ITR level
+ * @rc: structure containing ring performance data
+ *
+ * Stores a new ITR value based on packets and byte counts during
+ * the last interrupt.  The advantage of per interrupt computation
+ * is faster updates and more accurate ITR for the current traffic
+ * pattern.  Constants in this function were computed based on
+ * theoretical maximum wire speed and thresholds were set based on
+ * testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+{
+	enum i40e_latency_range new_latency_range = rc->latency_range;
+	u32 new_itr = rc->itr;
+	int bytes_per_int;
+
+	if (rc->total_packets == 0 || !rc->itr)
+		return;
+
+	/* simple throttlerate management
+	 *   0-10MB/s   lowest (100000 ints/s)
+	 *  10-20MB/s   low    (20000 ints/s)
+	 *  20-1249MB/s bulk   (8000 ints/s)
+	 */
+	bytes_per_int = rc->total_bytes / rc->itr;
+	switch (rc->itr) {
+	case I40E_LOWEST_LATENCY:
+		if (bytes_per_int > 10)
+			new_latency_range = I40E_LOW_LATENCY;
+		break;
+	case I40E_LOW_LATENCY:
+		if (bytes_per_int > 20)
+			new_latency_range = I40E_BULK_LATENCY;
+		else if (bytes_per_int <= 10)
+			new_latency_range = I40E_LOWEST_LATENCY;
+		break;
+	case I40E_BULK_LATENCY:
+		if (bytes_per_int <= 20)
+			rc->latency_range = I40E_LOW_LATENCY;
+		break;
+	}
+
+	switch (new_latency_range) {
+	case I40E_LOWEST_LATENCY:
+		new_itr = I40E_ITR_100K;
+		break;
+	case I40E_LOW_LATENCY:
+		new_itr = I40E_ITR_20K;
+		break;
+	case I40E_BULK_LATENCY:
+		new_itr = I40E_ITR_8K;
+		break;
+	default:
+		break;
+	}
+
+	if (new_itr != rc->itr) {
+		/* do an exponential smoothing */
+		new_itr = (10 * new_itr * rc->itr) /
+			  ((9 * new_itr) + rc->itr);
+		rc->itr = new_itr & I40E_MAX_ITR;
+	}
+
+	rc->total_bytes = 0;
+	rc->total_packets = 0;
+}
+
+/**
+ * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
+ * @q_vector: the vector to adjust
+ **/
+static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
+{
+	u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
+	struct i40e_hw *hw = &q_vector->vsi->back->hw;
+	u32 reg_addr;
+	u16 old_itr;
+
+	reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1);
+	old_itr = q_vector->rx.itr;
+	i40e_set_new_dynamic_itr(&q_vector->rx);
+	if (old_itr != q_vector->rx.itr)
+		wr32(hw, reg_addr, q_vector->rx.itr);
+
+	reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1);
+	old_itr = q_vector->tx.itr;
+	i40e_set_new_dynamic_itr(&q_vector->tx);
+	if (old_itr != q_vector->tx.itr)
+		wr32(hw, reg_addr, q_vector->tx.itr);
+}
+
+/**
+ * i40evf_setup_tx_descriptors - Allocate the Tx descriptors
+ * @tx_ring: the tx ring to set up
+ *
+ * Return 0 on success, negative on error
+ **/
+int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
+{
+	struct device *dev = tx_ring->dev;
+	int bi_size;
+
+	if (!dev)
+		return -ENOMEM;
+
+	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+	tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
+	if (!tx_ring->tx_bi)
+		goto err;
+
+	/* round up to nearest 4K */
+	tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+					   &tx_ring->dma, GFP_KERNEL);
+	if (!tx_ring->desc) {
+		dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
+			 tx_ring->size);
+		goto err;
+	}
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+	return 0;
+
+err:
+	kfree(tx_ring->tx_bi);
+	tx_ring->tx_bi = NULL;
+	return -ENOMEM;
+}
+
+/**
+ * i40evf_clean_rx_ring - Free Rx buffers
+ * @rx_ring: ring to be cleaned
+ **/
+void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
+{
+	struct device *dev = rx_ring->dev;
+	struct i40e_rx_buffer *rx_bi;
+	unsigned long bi_size;
+	u16 i;
+
+	/* ring already cleared, nothing to do */
+	if (!rx_ring->rx_bi)
+		return;
+
+	/* Free all the Rx ring sk_buffs */
+	for (i = 0; i < rx_ring->count; i++) {
+		rx_bi = &rx_ring->rx_bi[i];
+		if (rx_bi->dma) {
+			dma_unmap_single(dev,
+					 rx_bi->dma,
+					 rx_ring->rx_buf_len,
+					 DMA_FROM_DEVICE);
+			rx_bi->dma = 0;
+		}
+		if (rx_bi->skb) {
+			dev_kfree_skb(rx_bi->skb);
+			rx_bi->skb = NULL;
+		}
+		if (rx_bi->page) {
+			if (rx_bi->page_dma) {
+				dma_unmap_page(dev,
+					       rx_bi->page_dma,
+					       PAGE_SIZE / 2,
+					       DMA_FROM_DEVICE);
+				rx_bi->page_dma = 0;
+			}
+			__free_page(rx_bi->page);
+			rx_bi->page = NULL;
+			rx_bi->page_offset = 0;
+		}
+	}
+
+	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+	memset(rx_ring->rx_bi, 0, bi_size);
+
+	/* Zero out the descriptor ring */
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+}
+
+/**
+ * i40evf_free_rx_resources - Free Rx resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
+{
+	i40evf_clean_rx_ring(rx_ring);
+	kfree(rx_ring->rx_bi);
+	rx_ring->rx_bi = NULL;
+
+	if (rx_ring->desc) {
+		dma_free_coherent(rx_ring->dev, rx_ring->size,
+				  rx_ring->desc, rx_ring->dma);
+		rx_ring->desc = NULL;
+	}
+}
+
+/**
+ * i40evf_setup_rx_descriptors - Allocate Rx descriptors
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
+{
+	struct device *dev = rx_ring->dev;
+	int bi_size;
+
+	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+	rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
+	if (!rx_ring->rx_bi)
+		goto err;
+
+	/* Round up to nearest 4K */
+	rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
+		? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
+		: rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+	rx_ring->size = ALIGN(rx_ring->size, 4096);
+	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+					   &rx_ring->dma, GFP_KERNEL);
+
+	if (!rx_ring->desc) {
+		dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+			 rx_ring->size);
+		goto err;
+	}
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	return 0;
+err:
+	kfree(rx_ring->rx_bi);
+	rx_ring->rx_bi = NULL;
+	return -ENOMEM;
+}
+
+/**
+ * i40e_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ **/
+static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+{
+	rx_ring->next_to_use = val;
+	/* Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64).
+	 */
+	wmb();
+	writel(val, rx_ring->tail);
+}
+
+/**
+ * i40evf_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+	u16 i = rx_ring->next_to_use;
+	union i40e_rx_desc *rx_desc;
+	struct i40e_rx_buffer *bi;
+	struct sk_buff *skb;
+
+	/* do nothing if no valid netdev defined */
+	if (!rx_ring->netdev || !cleaned_count)
+		return;
+
+	while (cleaned_count--) {
+		rx_desc = I40E_RX_DESC(rx_ring, i);
+		bi = &rx_ring->rx_bi[i];
+		skb = bi->skb;
+
+		if (!skb) {
+			skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+							rx_ring->rx_buf_len);
+			if (!skb) {
+				rx_ring->rx_stats.alloc_buff_failed++;
+				goto no_buffers;
+			}
+			/* initialize queue mapping */
+			skb_record_rx_queue(skb, rx_ring->queue_index);
+			bi->skb = skb;
+		}
+
+		if (!bi->dma) {
+			bi->dma = dma_map_single(rx_ring->dev,
+						 skb->data,
+						 rx_ring->rx_buf_len,
+						 DMA_FROM_DEVICE);
+			if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+				rx_ring->rx_stats.alloc_buff_failed++;
+				bi->dma = 0;
+				goto no_buffers;
+			}
+		}
+
+		if (ring_is_ps_enabled(rx_ring)) {
+			if (!bi->page) {
+				bi->page = alloc_page(GFP_ATOMIC);
+				if (!bi->page) {
+					rx_ring->rx_stats.alloc_page_failed++;
+					goto no_buffers;
+				}
+			}
+
+			if (!bi->page_dma) {
+				/* use a half page if we're re-using */
+				bi->page_offset ^= PAGE_SIZE / 2;
+				bi->page_dma = dma_map_page(rx_ring->dev,
+							    bi->page,
+							    bi->page_offset,
+							    PAGE_SIZE / 2,
+							    DMA_FROM_DEVICE);
+				if (dma_mapping_error(rx_ring->dev,
+						      bi->page_dma)) {
+					rx_ring->rx_stats.alloc_page_failed++;
+					bi->page_dma = 0;
+					goto no_buffers;
+				}
+			}
+
+			/* Refresh the desc even if buffer_addrs didn't change
+			 * because each write-back erases this info.
+			 */
+			rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+			rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+		} else {
+			rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+			rx_desc->read.hdr_addr = 0;
+		}
+		i++;
+		if (i == rx_ring->count)
+			i = 0;
+	}
+
+no_buffers:
+	if (rx_ring->next_to_use != i)
+		i40e_release_rx_desc(rx_ring, i);
+}
+
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring:  rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+			     struct sk_buff *skb, u16 vlan_tag)
+{
+	struct i40e_q_vector *q_vector = rx_ring->q_vector;
+	struct i40e_vsi *vsi = rx_ring->vsi;
+	u64 flags = vsi->back->flags;
+
+	if (vlan_tag & VLAN_VID_MASK)
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+	if (flags & I40E_FLAG_IN_NETPOLL)
+		netif_rx(skb);
+	else
+		napi_gro_receive(&q_vector->napi, skb);
+}
+
+/**
+ * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * @vsi: the VSI we care about
+ * @skb: skb currently being received and modified
+ * @rx_status: status value of last descriptor in packet
+ * @rx_error: error value of last descriptor in packet
+ * @rx_ptype: ptype value of last descriptor in packet
+ **/
+static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
+				    struct sk_buff *skb,
+				    u32 rx_status,
+				    u32 rx_error,
+				    u16 rx_ptype)
+{
+	bool ipv4_tunnel, ipv6_tunnel;
+	__wsum rx_udp_csum;
+	__sum16 csum;
+	struct iphdr *iph;
+
+	ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+		      (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+	ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+		      (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+
+	skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
+	skb->ip_summed = CHECKSUM_NONE;
+
+	/* Rx csum enabled and ip headers found? */
+	if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
+	      rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+		return;
+
+	/* likely incorrect csum if alternate IP extention headers found */
+	if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+		return;
+
+	/* IP or L4 or outmost IP checksum error */
+	if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+			(1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
+			(1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
+		vsi->back->hw_csum_rx_error++;
+		return;
+	}
+
+	if (ipv4_tunnel &&
+	    !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+		/* If VXLAN traffic has an outer UDPv4 checksum we need to check
+		 * it in the driver, hardware does not do it for us.
+		 * Since L3L4P bit was set we assume a valid IHL value (>=5)
+		 * so the total length of IPv4 header is IHL*4 bytes
+		 */
+		skb->transport_header = skb->mac_header +
+					sizeof(struct ethhdr) +
+					(ip_hdr(skb)->ihl * 4);
+
+		/* Add 4 bytes for VLAN tagged packets */
+		skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
+					  skb->protocol == htons(ETH_P_8021AD))
+					  ? VLAN_HLEN : 0;
+
+		rx_udp_csum = udp_csum(skb);
+		iph = ip_hdr(skb);
+		csum = csum_tcpudp_magic(
+				iph->saddr, iph->daddr,
+				(skb->len - skb_transport_offset(skb)),
+				IPPROTO_UDP, rx_udp_csum);
+
+		if (udp_hdr(skb)->check != csum) {
+			vsi->back->hw_csum_rx_error++;
+			return;
+		}
+	}
+
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/**
+ * i40e_rx_hash - returns the hash value from the Rx descriptor
+ * @ring: descriptor ring
+ * @rx_desc: specific descriptor
+ **/
+static inline u32 i40e_rx_hash(struct i40e_ring *ring,
+			       union i40e_rx_desc *rx_desc)
+{
+	const __le64 rss_mask =
+		cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+			    I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+	if ((ring->netdev->features & NETIF_F_RXHASH) &&
+	    (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
+		return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+	else
+		return 0;
+}
+
+/**
+ * i40e_clean_rx_irq - Reclaim resources after receive completes
+ * @rx_ring:  rx ring to clean
+ * @budget:   how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+{
+	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+	u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
+	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+	const int current_node = numa_node_id();
+	struct i40e_vsi *vsi = rx_ring->vsi;
+	u16 i = rx_ring->next_to_clean;
+	union i40e_rx_desc *rx_desc;
+	u32 rx_error, rx_status;
+	u64 qword;
+	u16 rx_ptype;
+
+	rx_desc = I40E_RX_DESC(rx_ring, i);
+	qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+	rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
+				>> I40E_RXD_QW1_STATUS_SHIFT;
+
+	while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
+		union i40e_rx_desc *next_rxd;
+		struct i40e_rx_buffer *rx_bi;
+		struct sk_buff *skb;
+		u16 vlan_tag;
+		rx_bi = &rx_ring->rx_bi[i];
+		skb = rx_bi->skb;
+		prefetch(skb->data);
+
+		rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+				I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+		rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
+				I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+		rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
+			 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
+
+		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+			   I40E_RXD_QW1_ERROR_SHIFT;
+		rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+
+		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+			   I40E_RXD_QW1_PTYPE_SHIFT;
+		rx_bi->skb = NULL;
+
+		/* This memory barrier is needed to keep us from reading
+		 * any other fields out of the rx_desc until we know the
+		 * STATUS_DD bit is set
+		 */
+		rmb();
+
+		/* Get the header and possibly the whole packet
+		 * If this is an skb from previous receive dma will be 0
+		 */
+		if (rx_bi->dma) {
+			u16 len;
+
+			if (rx_hbo)
+				len = I40E_RX_HDR_SIZE;
+			else if (rx_sph)
+				len = rx_header_len;
+			else if (rx_packet_len)
+				len = rx_packet_len;   /* 1buf/no split found */
+			else
+				len = rx_header_len;   /* split always mode */
+
+			skb_put(skb, len);
+			dma_unmap_single(rx_ring->dev,
+					 rx_bi->dma,
+					 rx_ring->rx_buf_len,
+					 DMA_FROM_DEVICE);
+			rx_bi->dma = 0;
+		}
+
+		/* Get the rest of the data if this was a header split */
+		if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
+
+			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+					   rx_bi->page,
+					   rx_bi->page_offset,
+					   rx_packet_len);
+
+			skb->len += rx_packet_len;
+			skb->data_len += rx_packet_len;
+			skb->truesize += rx_packet_len;
+
+			if ((page_count(rx_bi->page) == 1) &&
+			    (page_to_nid(rx_bi->page) == current_node))
+				get_page(rx_bi->page);
+			else
+				rx_bi->page = NULL;
+
+			dma_unmap_page(rx_ring->dev,
+				       rx_bi->page_dma,
+				       PAGE_SIZE / 2,
+				       DMA_FROM_DEVICE);
+			rx_bi->page_dma = 0;
+		}
+		I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
+
+		if (unlikely(
+		    !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+			struct i40e_rx_buffer *next_buffer;
+
+			next_buffer = &rx_ring->rx_bi[i];
+
+			if (ring_is_ps_enabled(rx_ring)) {
+				rx_bi->skb = next_buffer->skb;
+				rx_bi->dma = next_buffer->dma;
+				next_buffer->skb = skb;
+				next_buffer->dma = 0;
+			}
+			rx_ring->rx_stats.non_eop_descs++;
+			goto next_desc;
+		}
+
+		/* ERR_MASK will only have valid bits if EOP set */
+		if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+			dev_kfree_skb_any(skb);
+			goto next_desc;
+		}
+
+		skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += skb->len;
+		total_rx_packets++;
+
+		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+
+		vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
+			 : 0;
+		i40e_receive_skb(rx_ring, skb, vlan_tag);
+
+		rx_ring->netdev->last_rx = jiffies;
+		budget--;
+next_desc:
+		rx_desc->wb.qword1.status_error_len = 0;
+		if (!budget)
+			break;
+
+		cleaned_count++;
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+			i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+
+		/* use prefetched values */
+		rx_desc = next_rxd;
+		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+			    I40E_RXD_QW1_STATUS_SHIFT;
+	}
+
+	rx_ring->next_to_clean = i;
+	u64_stats_update_begin(&rx_ring->syncp);
+	rx_ring->stats.packets += total_rx_packets;
+	rx_ring->stats.bytes += total_rx_bytes;
+	u64_stats_update_end(&rx_ring->syncp);
+	rx_ring->q_vector->rx.total_packets += total_rx_packets;
+	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+
+	if (cleaned_count)
+		i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
+
+	return budget > 0;
+}
+
+/**
+ * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean all queues associated with a q_vector.
+ *
+ * Returns the amount of work done
+ **/
+int i40evf_napi_poll(struct napi_struct *napi, int budget)
+{
+	struct i40e_q_vector *q_vector =
+			       container_of(napi, struct i40e_q_vector, napi);
+	struct i40e_vsi *vsi = q_vector->vsi;
+	struct i40e_ring *ring;
+	bool clean_complete = true;
+	int budget_per_ring;
+
+	if (test_bit(__I40E_DOWN, &vsi->state)) {
+		napi_complete(napi);
+		return 0;
+	}
+
+	/* Since the actual Tx work is minimal, we can give the Tx a larger
+	 * budget and be more aggressive about cleaning up the Tx descriptors.
+	 */
+	i40e_for_each_ring(ring, q_vector->tx)
+		clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+
+	/* We attempt to distribute budget to each Rx queue fairly, but don't
+	 * allow the budget to go below 1 because that would exit polling early.
+	 */
+	budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
+
+	i40e_for_each_ring(ring, q_vector->rx)
+		clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
+
+	/* If work not completed, return budget and polling will return */
+	if (!clean_complete)
+		return budget;
+
+	/* Work is done so exit the polling mode and re-enable the interrupt */
+	napi_complete(napi);
+	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
+	    ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+		i40e_update_dynamic_itr(q_vector);
+
+	if (!test_bit(__I40E_DOWN, &vsi->state))
+		i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx);
+
+	return 0;
+}
+
+/**
+ * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * @skb:     send buffer
+ * @tx_ring: ring to send buffer on
+ * @flags:   the tx flags to be set
+ *
+ * Checks the skb and set up correspondingly several generic transmit flags
+ * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
+ *
+ * Returns error code indicate the frame should be dropped upon error and the
+ * otherwise  returns 0 to indicate the flags has been set properly.
+ **/
+static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+				      struct i40e_ring *tx_ring,
+				      u32 *flags)
+{
+	__be16 protocol = skb->protocol;
+	u32  tx_flags = 0;
+
+	/* if we have a HW VLAN tag being added, default to the HW one */
+	if (vlan_tx_tag_present(skb)) {
+		tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+		tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+	/* else if it is a SW VLAN, check the next protocol and store the tag */
+	} else if (protocol == htons(ETH_P_8021Q)) {
+		struct vlan_hdr *vhdr, _vhdr;
+		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+		if (!vhdr)
+			return -EINVAL;
+
+		protocol = vhdr->h_vlan_encapsulated_proto;
+		tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
+		tx_flags |= I40E_TX_FLAGS_SW_VLAN;
+	}
+
+	*flags = tx_flags;
+	return 0;
+}
+
+/**
+ * i40e_tso - set up the tso context descriptor
+ * @tx_ring:  ptr to the ring to send
+ * @skb:      ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ * @protocol: the send protocol
+ * @hdr_len:  ptr to the size of the packet header
+ * @cd_tunneling: ptr to context descriptor bits
+ *
+ * Returns 0 if no TSO can happen, 1 if tso is going, or error
+ **/
+static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
+		    u32 tx_flags, __be16 protocol, u8 *hdr_len,
+		    u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+{
+	u32 cd_cmd, cd_tso_len, cd_mss;
+	struct tcphdr *tcph;
+	struct iphdr *iph;
+	u32 l4len;
+	int err;
+	struct ipv6hdr *ipv6h;
+
+	if (!skb_is_gso(skb))
+		return 0;
+
+	if (skb_header_cloned(skb)) {
+		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+		if (err)
+			return err;
+	}
+
+	if (protocol == htons(ETH_P_IP)) {
+		iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+		tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+		iph->tot_len = 0;
+		iph->check = 0;
+		tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+						 0, IPPROTO_TCP, 0);
+	} else if (skb_is_gso_v6(skb)) {
+
+		ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
+					   : ipv6_hdr(skb);
+		tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+		ipv6h->payload_len = 0;
+		tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+					       0, IPPROTO_TCP, 0);
+	}
+
+	l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
+	*hdr_len = (skb->encapsulation
+		    ? (skb_inner_transport_header(skb) - skb->data)
+		    : skb_transport_offset(skb)) + l4len;
+
+	/* find the field values */
+	cd_cmd = I40E_TX_CTX_DESC_TSO;
+	cd_tso_len = skb->len - *hdr_len;
+	cd_mss = skb_shinfo(skb)->gso_size;
+	*cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+				((u64)cd_tso_len <<
+				 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+				((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+	return 1;
+}
+
+/**
+ * i40e_tx_enable_csum - Enable Tx checksum offloads
+ * @skb: send buffer
+ * @tx_flags: Tx flags currently set
+ * @td_cmd: Tx descriptor command bits to set
+ * @td_offset: Tx descriptor header offsets to set
+ * @cd_tunneling: ptr to context desc bits
+ **/
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+				u32 *td_cmd, u32 *td_offset,
+				struct i40e_ring *tx_ring,
+				u32 *cd_tunneling)
+{
+	struct ipv6hdr *this_ipv6_hdr;
+	unsigned int this_tcp_hdrlen;
+	struct iphdr *this_ip_hdr;
+	u32 network_hdr_len;
+	u8 l4_hdr = 0;
+
+	if (skb->encapsulation) {
+		network_hdr_len = skb_inner_network_header_len(skb);
+		this_ip_hdr = inner_ip_hdr(skb);
+		this_ipv6_hdr = inner_ipv6_hdr(skb);
+		this_tcp_hdrlen = inner_tcp_hdrlen(skb);
+
+		if (tx_flags & I40E_TX_FLAGS_IPV4) {
+
+			if (tx_flags & I40E_TX_FLAGS_TSO) {
+				*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+				ip_hdr(skb)->check = 0;
+			} else {
+				*cd_tunneling |=
+					 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+			}
+		} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+			if (tx_flags & I40E_TX_FLAGS_TSO) {
+				*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+				ip_hdr(skb)->check = 0;
+			} else {
+				*cd_tunneling |=
+					 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+			}
+		}
+
+		/* Now set the ctx descriptor fields */
+		*cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
+					I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
+				   I40E_TXD_CTX_UDP_TUNNELING            |
+				   ((skb_inner_network_offset(skb) -
+					skb_transport_offset(skb)) >> 1) <<
+				   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+
+	} else {
+		network_hdr_len = skb_network_header_len(skb);
+		this_ip_hdr = ip_hdr(skb);
+		this_ipv6_hdr = ipv6_hdr(skb);
+		this_tcp_hdrlen = tcp_hdrlen(skb);
+	}
+
+	/* Enable IP checksum offloads */
+	if (tx_flags & I40E_TX_FLAGS_IPV4) {
+		l4_hdr = this_ip_hdr->protocol;
+		/* the stack computes the IP header already, the only time we
+		 * need the hardware to recompute it is in the case of TSO.
+		 */
+		if (tx_flags & I40E_TX_FLAGS_TSO) {
+			*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
+			this_ip_hdr->check = 0;
+		} else {
+			*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
+		}
+		/* Now set the td_offset for IP header length */
+		*td_offset = (network_hdr_len >> 2) <<
+			      I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+		l4_hdr = this_ipv6_hdr->nexthdr;
+		*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+		/* Now set the td_offset for IP header length */
+		*td_offset = (network_hdr_len >> 2) <<
+			      I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+	}
+	/* words in MACLEN + dwords in IPLEN + dwords in L4Len */
+	*td_offset |= (skb_network_offset(skb) >> 1) <<
+		       I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+	/* Enable L4 checksum offloads */
+	switch (l4_hdr) {
+	case IPPROTO_TCP:
+		/* enable checksum offloads */
+		*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+		*td_offset |= (this_tcp_hdrlen >> 2) <<
+			       I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		break;
+	case IPPROTO_SCTP:
+		/* enable SCTP checksum offload */
+		*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+		*td_offset |= (sizeof(struct sctphdr) >> 2) <<
+			       I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		break;
+	case IPPROTO_UDP:
+		/* enable UDP checksum offload */
+		*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+		*td_offset |= (sizeof(struct udphdr) >> 2) <<
+			       I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * i40e_create_tx_ctx Build the Tx context descriptor
+ * @tx_ring:  ring to create the descriptor on
+ * @cd_type_cmd_tso_mss: Quad Word 1
+ * @cd_tunneling: Quad Word 0 - bits 0-31
+ * @cd_l2tag2: Quad Word 0 - bits 32-63
+ **/
+static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
+			       const u64 cd_type_cmd_tso_mss,
+			       const u32 cd_tunneling, const u32 cd_l2tag2)
+{
+	struct i40e_tx_context_desc *context_desc;
+	int i = tx_ring->next_to_use;
+
+	if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
+		return;
+
+	/* grab the next descriptor */
+	context_desc = I40E_TX_CTXTDESC(tx_ring, i);
+
+	i++;
+	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+	/* cpu_to_le32 and assign to struct fields */
+	context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
+	context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
+	context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
+}
+
+/**
+ * i40e_tx_map - Build the Tx descriptor
+ * @tx_ring:  ring to send buffer on
+ * @skb:      send buffer
+ * @first:    first buffer info buffer to use
+ * @tx_flags: collected send information
+ * @hdr_len:  size of the packet header
+ * @td_cmd:   the command field in the descriptor
+ * @td_offset: offset for checksum or crc
+ **/
+static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+			struct i40e_tx_buffer *first, u32 tx_flags,
+			const u8 hdr_len, u32 td_cmd, u32 td_offset)
+{
+	unsigned int data_len = skb->data_len;
+	unsigned int size = skb_headlen(skb);
+	struct skb_frag_struct *frag;
+	struct i40e_tx_buffer *tx_bi;
+	struct i40e_tx_desc *tx_desc;
+	u16 i = tx_ring->next_to_use;
+	u32 td_tag = 0;
+	dma_addr_t dma;
+	u16 gso_segs;
+
+	if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
+		td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
+		td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
+			 I40E_TX_FLAGS_VLAN_SHIFT;
+	}
+
+	if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+		gso_segs = skb_shinfo(skb)->gso_segs;
+	else
+		gso_segs = 1;
+
+	/* multiply data chunks by size of headers */
+	first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
+	first->gso_segs = gso_segs;
+	first->skb = skb;
+	first->tx_flags = tx_flags;
+
+	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+	tx_desc = I40E_TX_DESC(tx_ring, i);
+	tx_bi = first;
+
+	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+		if (dma_mapping_error(tx_ring->dev, dma))
+			goto dma_error;
+
+		/* record length, and DMA address */
+		dma_unmap_len_set(tx_bi, len, size);
+		dma_unmap_addr_set(tx_bi, dma, dma);
+
+		tx_desc->buffer_addr = cpu_to_le64(dma);
+
+		while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
+			tx_desc->cmd_type_offset_bsz =
+				build_ctob(td_cmd, td_offset,
+					   I40E_MAX_DATA_PER_TXD, td_tag);
+
+			tx_desc++;
+			i++;
+			if (i == tx_ring->count) {
+				tx_desc = I40E_TX_DESC(tx_ring, 0);
+				i = 0;
+			}
+
+			dma += I40E_MAX_DATA_PER_TXD;
+			size -= I40E_MAX_DATA_PER_TXD;
+
+			tx_desc->buffer_addr = cpu_to_le64(dma);
+		}
+
+		if (likely(!data_len))
+			break;
+
+		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+							  size, td_tag);
+
+		tx_desc++;
+		i++;
+		if (i == tx_ring->count) {
+			tx_desc = I40E_TX_DESC(tx_ring, 0);
+			i = 0;
+		}
+
+		size = skb_frag_size(frag);
+		data_len -= size;
+
+		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+				       DMA_TO_DEVICE);
+
+		tx_bi = &tx_ring->tx_bi[i];
+	}
+
+	tx_desc->cmd_type_offset_bsz =
+		build_ctob(td_cmd, td_offset, size, td_tag) |
+		cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+
+	netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+						 tx_ring->queue_index),
+			     first->bytecount);
+
+	/* set the timestamp */
+	first->time_stamp = jiffies;
+
+	/* Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64).
+	 */
+	wmb();
+
+	/* set next_to_watch value indicating a packet is present */
+	first->next_to_watch = tx_desc;
+
+	i++;
+	if (i == tx_ring->count)
+		i = 0;
+
+	tx_ring->next_to_use = i;
+
+	/* notify HW of packet */
+	writel(i, tx_ring->tail);
+
+	return;
+
+dma_error:
+	dev_info(tx_ring->dev, "TX DMA map failed\n");
+
+	/* clear dma mappings for failed tx_bi map */
+	for (;;) {
+		tx_bi = &tx_ring->tx_bi[i];
+		i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
+		if (tx_bi == first)
+			break;
+		if (i == 0)
+			i = tx_ring->count;
+		i--;
+	}
+
+	tx_ring->next_to_use = i;
+}
+
+/**
+ * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size:    the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+	/* Memory barrier before checking head and tail */
+	smp_mb();
+
+	/* Check again in a case another CPU has just made room available. */
+	if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+		return -EBUSY;
+
+	/* A reprieve! - use start_queue because it doesn't call schedule */
+	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+	++tx_ring->tx_stats.restart_queue;
+	return 0;
+}
+
+/**
+ * i40e_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size:    the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+	if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+		return 0;
+	return __i40e_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * @skb:     send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns number of data descriptors needed for this skb. Returns 0 to indicate
+ * there is not enough descriptors available in this ring since we need at least
+ * one descriptor.
+ **/
+static int i40e_xmit_descriptor_count(struct sk_buff *skb,
+				      struct i40e_ring *tx_ring)
+{
+#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
+	unsigned int f;
+#endif
+	int count = 0;
+
+	/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+	 *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+	 *       + 2 desc gap to keep tail from touching head,
+	 *       + 1 desc for context descriptor,
+	 * otherwise try next time
+	 */
+#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
+	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+	count += skb_shinfo(skb)->nr_frags;
+#endif
+	count += TXD_USE_COUNT(skb_headlen(skb));
+	if (i40e_maybe_stop_tx(tx_ring, count + 3)) {
+		tx_ring->tx_stats.tx_busy++;
+		return 0;
+	}
+	return count;
+}
+
+/**
+ * i40e_xmit_frame_ring - Sends buffer on Tx ring
+ * @skb:     send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
+					struct i40e_ring *tx_ring)
+{
+	u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
+	u32 cd_tunneling = 0, cd_l2tag2 = 0;
+	struct i40e_tx_buffer *first;
+	u32 td_offset = 0;
+	u32 tx_flags = 0;
+	__be16 protocol;
+	u32 td_cmd = 0;
+	u8 hdr_len = 0;
+	int tso;
+	if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+		return NETDEV_TX_BUSY;
+
+	/* prepare the xmit flags */
+	if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+		goto out_drop;
+
+	/* obtain protocol of skb */
+	protocol = skb->protocol;
+
+	/* record the location of the first descriptor for this packet */
+	first = &tx_ring->tx_bi[tx_ring->next_to_use];
+
+	/* setup IPv4/IPv6 offloads */
+	if (protocol == htons(ETH_P_IP))
+		tx_flags |= I40E_TX_FLAGS_IPV4;
+	else if (protocol == htons(ETH_P_IPV6))
+		tx_flags |= I40E_TX_FLAGS_IPV6;
+
+	tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+		       &cd_type_cmd_tso_mss, &cd_tunneling);
+
+	if (tso < 0)
+		goto out_drop;
+	else if (tso)
+		tx_flags |= I40E_TX_FLAGS_TSO;
+
+	skb_tx_timestamp(skb);
+
+	/* always enable CRC insertion offload */
+	td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
+	/* Always offload the checksum, since it's in the data descriptor */
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		tx_flags |= I40E_TX_FLAGS_CSUM;
+
+		i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+				    tx_ring, &cd_tunneling);
+	}
+
+	i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
+			   cd_tunneling, cd_l2tag2);
+
+	i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+		    td_cmd, td_offset);
+
+	i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+	return NETDEV_TX_OK;
+
+out_drop:
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+
+/**
+ * i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
+ * @skb:    send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	struct i40e_ring *tx_ring = adapter->tx_rings[skb->queue_mapping];
+
+	/* hardware can't handle really short frames, hardware padding works
+	 * beyond this point
+	 */
+	if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
+		if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
+			return NETDEV_TX_OK;
+		skb->len = I40E_MIN_TX_LEN;
+		skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
+	}
+
+	return i40e_xmit_frame_ring(skb, tx_ring);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
new file mode 100644
index 000000000000..10bf49e18d7f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -0,0 +1,296 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_TXRX_H_
+#define _I40E_TXRX_H_
+
+/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
+
+#define I40E_MAX_ITR               0x0FF0  /* reg uses 2 usec resolution */
+#define I40E_MIN_ITR               0x0004  /* reg uses 2 usec resolution */
+#define I40E_MAX_IRATE             0x03F
+#define I40E_MIN_IRATE             0x001
+#define I40E_IRATE_USEC_RESOLUTION 4
+#define I40E_ITR_100K              0x0005
+#define I40E_ITR_20K               0x0019
+#define I40E_ITR_8K                0x003E
+#define I40E_ITR_4K                0x007A
+#define I40E_ITR_RX_DEF            I40E_ITR_8K
+#define I40E_ITR_TX_DEF            I40E_ITR_4K
+#define I40E_ITR_DYNAMIC           0x8000  /* use top bit as a flag */
+#define I40E_MIN_INT_RATE          250     /* ~= 1000000 / (I40E_MAX_ITR * 2) */
+#define I40E_MAX_INT_RATE          500000  /* == 1000000 / (I40E_MIN_ITR * 2) */
+#define I40E_DEFAULT_IRQ_WORK      256
+#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
+#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
+#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+
+#define I40E_QUEUE_END_OF_LIST 0x7FF
+
+/* this enum matches hardware bits and is meant to be used by DYN_CTLN
+ * registers and QINT registers or more generally anywhere in the manual
+ * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+ * register but instead is a special value meaning "don't update" ITR0/1/2.
+ */
+enum i40e_dyn_idx_t {
+	I40E_IDX_ITR0 = 0,
+	I40E_IDX_ITR1 = 1,
+	I40E_IDX_ITR2 = 2,
+	I40E_ITR_NONE = 3	/* ITR_NONE must not be used as an index */
+};
+
+/* these are indexes into ITRN registers */
+#define I40E_RX_ITR    I40E_IDX_ITR0
+#define I40E_TX_ITR    I40E_IDX_ITR1
+#define I40E_PE_ITR    I40E_IDX_ITR2
+
+/* Supported RSS offloads */
+#define I40E_DEFAULT_RSS_HENA ( \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+	((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+	((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+	((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+/* Supported Rx Buffer Sizes */
+#define I40E_RXBUFFER_512   512    /* Used for packet split */
+#define I40E_RXBUFFER_2048  2048
+#define I40E_RXBUFFER_3072  3072   /* For FCoE MTU of 2158 */
+#define I40E_RXBUFFER_4096  4096
+#define I40E_RXBUFFER_8192  8192
+#define I40E_MAX_RXBUFFER   9728  /* largest size for single descriptor */
+
+/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
+ * this adds up to 512 bytes of extra data meaning the smallest allocation
+ * we could have is 1K.
+ * i.e. RXBUFFER_512 --> size-1024 slab
+ */
+#define I40E_RX_HDR_SIZE  I40E_RXBUFFER_512
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40E_RX_BUFFER_WRITE	16	/* Must be power of 2 */
+#define I40E_RX_NEXT_DESC(r, i, n)		\
+	do {					\
+		(i)++;				\
+		if ((i) == (r)->count)		\
+			i = 0;			\
+		(n) = I40E_RX_DESC((r), (i));	\
+	} while (0)
+
+#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n)		\
+	do {						\
+		I40E_RX_NEXT_DESC((r), (i), (n));	\
+		prefetch((n));				\
+	} while (0)
+
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+#define I40E_MIN_TX_LEN		17
+#define I40E_MAX_DATA_PER_TXD	16383	/* aka 16kB - 1 */
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
+#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+
+#define I40E_TX_FLAGS_CSUM		(u32)(1)
+#define I40E_TX_FLAGS_HW_VLAN		(u32)(1 << 1)
+#define I40E_TX_FLAGS_SW_VLAN		(u32)(1 << 2)
+#define I40E_TX_FLAGS_TSO		(u32)(1 << 3)
+#define I40E_TX_FLAGS_IPV4		(u32)(1 << 4)
+#define I40E_TX_FLAGS_IPV6		(u32)(1 << 5)
+#define I40E_TX_FLAGS_FCCRC		(u32)(1 << 6)
+#define I40E_TX_FLAGS_FSO		(u32)(1 << 7)
+#define I40E_TX_FLAGS_VLAN_MASK		0xffff0000
+#define I40E_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
+#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT	29
+#define I40E_TX_FLAGS_VLAN_SHIFT	16
+
+struct i40e_tx_buffer {
+	struct i40e_tx_desc *next_to_watch;
+	unsigned long time_stamp;
+	struct sk_buff *skb;
+	unsigned int bytecount;
+	unsigned short gso_segs;
+	DEFINE_DMA_UNMAP_ADDR(dma);
+	DEFINE_DMA_UNMAP_LEN(len);
+	u32 tx_flags;
+};
+
+struct i40e_rx_buffer {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	struct page *page;
+	dma_addr_t page_dma;
+	unsigned int page_offset;
+};
+
+struct i40e_queue_stats {
+	u64 packets;
+	u64 bytes;
+};
+
+struct i40e_tx_queue_stats {
+	u64 restart_queue;
+	u64 tx_busy;
+	u64 tx_done_old;
+};
+
+struct i40e_rx_queue_stats {
+	u64 non_eop_descs;
+	u64 alloc_page_failed;
+	u64 alloc_buff_failed;
+};
+
+enum i40e_ring_state_t {
+	__I40E_TX_FDIR_INIT_DONE,
+	__I40E_TX_XPS_INIT_DONE,
+	__I40E_TX_DETECT_HANG,
+	__I40E_HANG_CHECK_ARMED,
+	__I40E_RX_PS_ENABLED,
+	__I40E_RX_LRO_ENABLED,
+	__I40E_RX_16BYTE_DESC_ENABLED,
+};
+
+#define ring_is_ps_enabled(ring) \
+	test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define set_ring_ps_enabled(ring) \
+	set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define clear_ring_ps_enabled(ring) \
+	clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define check_for_tx_hang(ring) \
+	test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+	set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+	clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define ring_is_lro_enabled(ring) \
+	test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define set_ring_lro_enabled(ring) \
+	set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define clear_ring_lro_enabled(ring) \
+	clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define ring_is_16byte_desc_enabled(ring) \
+	test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define set_ring_16byte_desc_enabled(ring) \
+	set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define clear_ring_16byte_desc_enabled(ring) \
+	clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+
+/* struct that defines a descriptor ring, associated with a VSI */
+struct i40e_ring {
+	struct i40e_ring *next;		/* pointer to next ring in q_vector */
+	void *desc;			/* Descriptor ring memory */
+	struct device *dev;		/* Used for DMA mapping */
+	struct net_device *netdev;	/* netdev ring maps to */
+	union {
+		struct i40e_tx_buffer *tx_bi;
+		struct i40e_rx_buffer *rx_bi;
+	};
+	unsigned long state;
+	u16 queue_index;		/* Queue number of ring */
+	u8 dcb_tc;			/* Traffic class of ring */
+	u8 __iomem *tail;
+
+	u16 count;			/* Number of descriptors */
+	u16 reg_idx;			/* HW register index of the ring */
+	u16 rx_hdr_len;
+	u16 rx_buf_len;
+	u8  dtype;
+#define I40E_RX_DTYPE_NO_SPLIT      0
+#define I40E_RX_DTYPE_SPLIT_ALWAYS  1
+#define I40E_RX_DTYPE_HEADER_SPLIT  2
+	u8  hsplit;
+#define I40E_RX_SPLIT_L2      0x1
+#define I40E_RX_SPLIT_IP      0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP    0x8
+
+	/* used in interrupt processing */
+	u16 next_to_use;
+	u16 next_to_clean;
+
+	u8 atr_sample_rate;
+	u8 atr_count;
+
+	bool ring_active;		/* is ring online or not */
+
+	/* stats structs */
+	struct i40e_queue_stats	stats;
+	struct u64_stats_sync syncp;
+	union {
+		struct i40e_tx_queue_stats tx_stats;
+		struct i40e_rx_queue_stats rx_stats;
+	};
+
+	unsigned int size;		/* length of descriptor ring in bytes */
+	dma_addr_t dma;			/* physical address of ring */
+
+	struct i40e_vsi *vsi;		/* Backreference to associated VSI */
+	struct i40e_q_vector *q_vector;	/* Backreference to associated vector */
+
+	struct rcu_head rcu;		/* to avoid race on free */
+} ____cacheline_internodealigned_in_smp;
+
+enum i40e_latency_range {
+	I40E_LOWEST_LATENCY = 0,
+	I40E_LOW_LATENCY = 1,
+	I40E_BULK_LATENCY = 2,
+};
+
+struct i40e_ring_container {
+	/* array of pointers to rings */
+	struct i40e_ring *ring;
+	unsigned int total_bytes;	/* total bytes processed this int */
+	unsigned int total_packets;	/* total packets processed this int */
+	u16 count;
+	enum i40e_latency_range latency_range;
+	u16 itr;
+};
+
+/* iterator for handling rings in ring container */
+#define i40e_for_each_ring(pos, head) \
+	for (pos = (head).ring; pos != NULL; pos = pos->next)
+
+void i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
+netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
+void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
+int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring);
+int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring);
+void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
+void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
+int i40evf_napi_poll(struct napi_struct *napi, int budget);
+#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
new file mode 100644
index 000000000000..3bffac06592f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -0,0 +1,1152 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_TYPE_H_
+#define _I40E_TYPE_H_
+
+#include "i40e_status.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710	0x1572
+#define I40E_DEV_ID_SFP_X710		0x1573
+#define I40E_DEV_ID_QEMU		0x1574
+#define I40E_DEV_ID_KX_A		0x157F
+#define I40E_DEV_ID_KX_B		0x1580
+#define I40E_DEV_ID_KX_C		0x1581
+#define I40E_DEV_ID_KX_D		0x1582
+#define I40E_DEV_ID_QSFP_A		0x1583
+#define I40E_DEV_ID_QSFP_B		0x1584
+#define I40E_DEV_ID_QSFP_C		0x1585
+#define I40E_DEV_ID_VF		0x154C
+#define I40E_DEV_ID_VF_HV		0x1571
+
+#define i40e_is_40G_device(d)		((d) == I40E_DEV_ID_QSFP_A  || \
+					 (d) == I40E_DEV_ID_QSFP_B  || \
+					 (d) == I40E_DEV_ID_QSFP_C)
+
+#define I40E_MAX_VSI_QP			16
+#define I40E_MAX_VF_VSI			3
+#define I40E_MAX_CHAINED_RX_BUFFERS	5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS	16
+
+/* Max default timeout in ms, */
+#define I40E_MAX_NVM_TIMEOUT		18000
+
+/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time)		(((time) * 1000) / 2)
+
+/* forward declaration */
+struct i40e_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+
+#define ETH_ALEN	6
+
+/* Data type manipulation macros. */
+
+#define I40E_DESC_UNUSED(R)	\
+	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+	(R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define I40E_QTX_CTL_VF_QUEUE	0x0
+#define I40E_QTX_CTL_VM_QUEUE	0x1
+#define I40E_QTX_CTL_PF_QUEUE	0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum i40e_debug_mask {
+	I40E_DEBUG_INIT			= 0x00000001,
+	I40E_DEBUG_RELEASE		= 0x00000002,
+
+	I40E_DEBUG_LINK			= 0x00000010,
+	I40E_DEBUG_PHY			= 0x00000020,
+	I40E_DEBUG_HMC			= 0x00000040,
+	I40E_DEBUG_NVM			= 0x00000080,
+	I40E_DEBUG_LAN			= 0x00000100,
+	I40E_DEBUG_FLOW			= 0x00000200,
+	I40E_DEBUG_DCB			= 0x00000400,
+	I40E_DEBUG_DIAG			= 0x00000800,
+
+	I40E_DEBUG_AQ_MESSAGE		= 0x01000000,
+	I40E_DEBUG_AQ_DESCRIPTOR	= 0x02000000,
+	I40E_DEBUG_AQ_DESC_BUFFER	= 0x04000000,
+	I40E_DEBUG_AQ_COMMAND		= 0x06000000,
+	I40E_DEBUG_AQ			= 0x0F000000,
+
+	I40E_DEBUG_USER			= 0xF0000000,
+
+	I40E_DEBUG_ALL			= 0xFFFFFFFF
+};
+
+/* PCI Bus Info */
+#define I40E_PCI_LINK_WIDTH_1		0x10
+#define I40E_PCI_LINK_WIDTH_2		0x20
+#define I40E_PCI_LINK_WIDTH_4		0x40
+#define I40E_PCI_LINK_WIDTH_8		0x80
+#define I40E_PCI_LINK_SPEED_2500	0x1
+#define I40E_PCI_LINK_SPEED_5000	0x2
+#define I40E_PCI_LINK_SPEED_8000	0x3
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with.  This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed.  For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum i40e_mac_type {
+	I40E_MAC_UNKNOWN = 0,
+	I40E_MAC_X710,
+	I40E_MAC_XL710,
+	I40E_MAC_VF,
+	I40E_MAC_GENERIC,
+};
+
+enum i40e_media_type {
+	I40E_MEDIA_TYPE_UNKNOWN = 0,
+	I40E_MEDIA_TYPE_FIBER,
+	I40E_MEDIA_TYPE_BASET,
+	I40E_MEDIA_TYPE_BACKPLANE,
+	I40E_MEDIA_TYPE_CX4,
+	I40E_MEDIA_TYPE_DA,
+	I40E_MEDIA_TYPE_VIRTUAL
+};
+
+enum i40e_fc_mode {
+	I40E_FC_NONE = 0,
+	I40E_FC_RX_PAUSE,
+	I40E_FC_TX_PAUSE,
+	I40E_FC_FULL,
+	I40E_FC_PFC,
+	I40E_FC_DEFAULT
+};
+
+enum i40e_vsi_type {
+	I40E_VSI_MAIN = 0,
+	I40E_VSI_VMDQ1,
+	I40E_VSI_VMDQ2,
+	I40E_VSI_CTRL,
+	I40E_VSI_FCOE,
+	I40E_VSI_MIRROR,
+	I40E_VSI_SRIOV,
+	I40E_VSI_FDIR,
+	I40E_VSI_TYPE_UNKNOWN
+};
+
+enum i40e_queue_type {
+	I40E_QUEUE_TYPE_RX = 0,
+	I40E_QUEUE_TYPE_TX,
+	I40E_QUEUE_TYPE_PE_CEQ,
+	I40E_QUEUE_TYPE_UNKNOWN
+};
+
+struct i40e_link_status {
+	enum i40e_aq_phy_type phy_type;
+	enum i40e_aq_link_speed link_speed;
+	u8 link_info;
+	u8 an_info;
+	u8 ext_info;
+	u8 loopback;
+	/* is Link Status Event notification to SW enabled */
+	bool lse_enable;
+};
+
+struct i40e_phy_info {
+	struct i40e_link_status link_info;
+	struct i40e_link_status link_info_old;
+	u32 autoneg_advertised;
+	u32 phy_id;
+	u32 module_type;
+	bool get_link_info;
+	enum i40e_media_type media_type;
+};
+
+#define I40E_HW_CAP_MAX_GPIO			30
+/* Capabilities of a PF or a VF or the whole device */
+struct i40e_hw_capabilities {
+	u32  switch_mode;
+#define I40E_NVM_IMAGE_TYPE_EVB		0x0
+#define I40E_NVM_IMAGE_TYPE_CLOUD	0x2
+#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD	0x3
+
+	u32  management_mode;
+	u32  npar_enable;
+	u32  os2bmc;
+	u32  valid_functions;
+	bool sr_iov_1_1;
+	bool vmdq;
+	bool evb_802_1_qbg; /* Edge Virtual Bridging */
+	bool evb_802_1_qbh; /* Bridge Port Extension */
+	bool dcb;
+	bool fcoe;
+	bool mfp_mode_1;
+	bool mgmt_cem;
+	bool ieee_1588;
+	bool iwarp;
+	bool fd;
+	u32 fd_filters_guaranteed;
+	u32 fd_filters_best_effort;
+	bool rss;
+	u32 rss_table_size;
+	u32 rss_table_entry_width;
+	bool led[I40E_HW_CAP_MAX_GPIO];
+	bool sdp[I40E_HW_CAP_MAX_GPIO];
+	u32 nvm_image_type;
+	u32 num_flow_director_filters;
+	u32 num_vfs;
+	u32 vf_base_id;
+	u32 num_vsis;
+	u32 num_rx_qp;
+	u32 num_tx_qp;
+	u32 base_queue;
+	u32 num_msix_vectors;
+	u32 num_msix_vectors_vf;
+	u32 led_pin_num;
+	u32 sdp_pin_num;
+	u32 mdio_port_num;
+	u32 mdio_port_mode;
+	u8 rx_buf_chain_len;
+	u32 enabled_tcmap;
+	u32 maxtc;
+};
+
+struct i40e_mac_info {
+	enum i40e_mac_type type;
+	u8 addr[ETH_ALEN];
+	u8 perm_addr[ETH_ALEN];
+	u8 san_addr[ETH_ALEN];
+	u16 max_fcoeq;
+};
+
+enum i40e_aq_resources_ids {
+	I40E_NVM_RESOURCE_ID = 1
+};
+
+enum i40e_aq_resource_access_type {
+	I40E_RESOURCE_READ = 1,
+	I40E_RESOURCE_WRITE
+};
+
+struct i40e_nvm_info {
+	u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
+	u64 hw_semaphore_wait;    /* - || - */
+	u32 timeout;              /* [ms] */
+	u16 sr_size;              /* Shadow RAM size in words */
+	bool blank_nvm_mode;      /* is NVM empty (no FW present)*/
+	u16 version;              /* NVM package version */
+	u32 eetrack;              /* NVM data version */
+};
+
+/* PCI bus types */
+enum i40e_bus_type {
+	i40e_bus_type_unknown = 0,
+	i40e_bus_type_pci,
+	i40e_bus_type_pcix,
+	i40e_bus_type_pci_express,
+	i40e_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum i40e_bus_speed {
+	i40e_bus_speed_unknown	= 0,
+	i40e_bus_speed_33	= 33,
+	i40e_bus_speed_66	= 66,
+	i40e_bus_speed_100	= 100,
+	i40e_bus_speed_120	= 120,
+	i40e_bus_speed_133	= 133,
+	i40e_bus_speed_2500	= 2500,
+	i40e_bus_speed_5000	= 5000,
+	i40e_bus_speed_8000	= 8000,
+	i40e_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum i40e_bus_width {
+	i40e_bus_width_unknown	= 0,
+	i40e_bus_width_pcie_x1	= 1,
+	i40e_bus_width_pcie_x2	= 2,
+	i40e_bus_width_pcie_x4	= 4,
+	i40e_bus_width_pcie_x8	= 8,
+	i40e_bus_width_32	= 32,
+	i40e_bus_width_64	= 64,
+	i40e_bus_width_reserved
+};
+
+/* Bus parameters */
+struct i40e_bus_info {
+	enum i40e_bus_speed speed;
+	enum i40e_bus_width width;
+	enum i40e_bus_type type;
+
+	u16 func;
+	u16 device;
+	u16 lan_id;
+};
+
+/* Flow control (FC) parameters */
+struct i40e_fc_info {
+	enum i40e_fc_mode current_mode; /* FC mode in effect */
+	enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define I40E_MAX_TRAFFIC_CLASS		8
+#define I40E_MAX_USER_PRIORITY		8
+#define I40E_DCBX_MAX_APPS		32
+#define I40E_LLDPDU_SIZE		1500
+
+/* IEEE 802.1Qaz ETS Configuration data */
+struct i40e_ieee_ets_config {
+	u8 willing;
+	u8 cbs;
+	u8 maxtcs;
+	u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+	u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+	u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz ETS Recommendation data */
+struct i40e_ieee_ets_recommend {
+	u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+	u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+	u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz PFC Configuration data */
+struct i40e_ieee_pfc_config {
+	u8 willing;
+	u8 mbc;
+	u8 pfccap;
+	u8 pfcenable;
+};
+
+/* IEEE 802.1Qaz Application Priority data */
+struct i40e_ieee_app_priority_table {
+	u8  priority;
+	u8  selector;
+	u16 protocolid;
+};
+
+struct i40e_dcbx_config {
+	u32 numapps;
+	struct i40e_ieee_ets_config etscfg;
+	struct i40e_ieee_ets_recommend etsrec;
+	struct i40e_ieee_pfc_config pfc;
+	struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct i40e_hw {
+	u8 __iomem *hw_addr;
+	void *back;
+
+	/* function pointer structs */
+	struct i40e_phy_info phy;
+	struct i40e_mac_info mac;
+	struct i40e_bus_info bus;
+	struct i40e_nvm_info nvm;
+	struct i40e_fc_info fc;
+
+	/* pci info */
+	u16 device_id;
+	u16 vendor_id;
+	u16 subsystem_device_id;
+	u16 subsystem_vendor_id;
+	u8 revision_id;
+	u8 port;
+	bool adapter_stopped;
+
+	/* capabilities for entire device and PCI func */
+	struct i40e_hw_capabilities dev_caps;
+	struct i40e_hw_capabilities func_caps;
+
+	/* Flow Director shared filter space */
+	u16 fdir_shared_filter_count;
+
+	/* device profile info */
+	u8  pf_id;
+	u16 main_vsi_seid;
+
+	/* Closest numa node to the device */
+	u16 numa_node;
+
+	/* Admin Queue info */
+	struct i40e_adminq_info aq;
+
+	/* HMC info */
+	struct i40e_hmc_info hmc; /* HMC info struct */
+
+	/* LLDP/DCBX Status */
+	u16 dcbx_status;
+
+	/* DCBX info */
+	struct i40e_dcbx_config local_dcbx_config;
+	struct i40e_dcbx_config remote_dcbx_config;
+
+	/* debug mask */
+	u32 debug_mask;
+};
+
+struct i40e_driver_version {
+	u8 major_version;
+	u8 minor_version;
+	u8 build_version;
+	u8 subbuild_version;
+};
+
+/* RX Descriptors */
+union i40e_16byte_rx_desc {
+	struct {
+		__le64 pkt_addr; /* Packet buffer address */
+		__le64 hdr_addr; /* Header buffer address */
+	} read;
+	struct {
+		struct {
+			struct {
+				union {
+					__le16 mirroring_status;
+					__le16 fcoe_ctx_id;
+				} mirr_fcoe;
+				__le16 l2tag1;
+			} lo_dword;
+			union {
+				__le32 rss; /* RSS Hash */
+				__le32 fd_id; /* Flow director filter id */
+				__le32 fcoe_param; /* FCoE DDP Context id */
+			} hi_dword;
+		} qword0;
+		struct {
+			/* ext status/error/pktype/length */
+			__le64 status_error_len;
+		} qword1;
+	} wb;  /* writeback */
+};
+
+union i40e_32byte_rx_desc {
+	struct {
+		__le64  pkt_addr; /* Packet buffer address */
+		__le64  hdr_addr; /* Header buffer address */
+			/* bit 0 of hdr_buffer_addr is DD bit */
+		__le64  rsvd1;
+		__le64  rsvd2;
+	} read;
+	struct {
+		struct {
+			struct {
+				union {
+					__le16 mirroring_status;
+					__le16 fcoe_ctx_id;
+				} mirr_fcoe;
+				__le16 l2tag1;
+			} lo_dword;
+			union {
+				__le32 rss; /* RSS Hash */
+				__le32 fcoe_param; /* FCoE DDP Context id */
+			} hi_dword;
+		} qword0;
+		struct {
+			/* status/error/pktype/length */
+			__le64 status_error_len;
+		} qword1;
+		struct {
+			__le16 ext_status; /* extended status */
+			__le16 rsvd;
+			__le16 l2tag2_1;
+			__le16 l2tag2_2;
+		} qword2;
+		struct {
+			union {
+				__le32 flex_bytes_lo;
+				__le32 pe_status;
+			} lo_dword;
+			union {
+				__le32 flex_bytes_hi;
+				__le32 fd_id;
+			} hi_dword;
+		} qword3;
+	} wb;  /* writeback */
+};
+
+#define I40E_RXD_QW1_STATUS_SHIFT	0
+#define I40E_RXD_QW1_STATUS_MASK	(0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
+
+enum i40e_rx_desc_status_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_DESC_STATUS_DD_SHIFT		= 0,
+	I40E_RX_DESC_STATUS_EOF_SHIFT		= 1,
+	I40E_RX_DESC_STATUS_L2TAG1P_SHIFT	= 2,
+	I40E_RX_DESC_STATUS_L3L4P_SHIFT		= 3,
+	I40E_RX_DESC_STATUS_CRCP_SHIFT		= 4,
+	I40E_RX_DESC_STATUS_TSYNINDX_SHIFT	= 5, /* 2 BITS */
+	I40E_RX_DESC_STATUS_TSYNVALID_SHIFT	= 7,
+	I40E_RX_DESC_STATUS_PIF_SHIFT		= 8,
+	I40E_RX_DESC_STATUS_UMBCAST_SHIFT	= 9, /* 2 BITS */
+	I40E_RX_DESC_STATUS_FLM_SHIFT		= 11,
+	I40E_RX_DESC_STATUS_FLTSTAT_SHIFT	= 12, /* 2 BITS */
+	I40E_RX_DESC_STATUS_LPBK_SHIFT		= 14,
+	I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT	= 15,
+	I40E_RX_DESC_STATUS_RESERVED_SHIFT	= 16, /* 2 BITS */
+	I40E_RX_DESC_STATUS_UDP_0_SHIFT		= 18
+};
+
+#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK	(0x3UL << \
+					     I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK	(0x1UL << \
+					 I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+enum i40e_rx_desc_fltstat_values {
+	I40E_RX_DESC_FLTSTAT_NO_DATA	= 0,
+	I40E_RX_DESC_FLTSTAT_RSV_FD_ID	= 1, /* 16byte desc? FD_ID : RSV */
+	I40E_RX_DESC_FLTSTAT_RSV	= 2,
+	I40E_RX_DESC_FLTSTAT_RSS_HASH	= 3,
+};
+
+#define I40E_RXD_QW1_ERROR_SHIFT	19
+#define I40E_RXD_QW1_ERROR_MASK		(0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
+
+enum i40e_rx_desc_error_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_DESC_ERROR_RXE_SHIFT		= 0,
+	I40E_RX_DESC_ERROR_RECIPE_SHIFT		= 1,
+	I40E_RX_DESC_ERROR_HBO_SHIFT		= 2,
+	I40E_RX_DESC_ERROR_L3L4E_SHIFT		= 3, /* 3 BITS */
+	I40E_RX_DESC_ERROR_IPE_SHIFT		= 3,
+	I40E_RX_DESC_ERROR_L4E_SHIFT		= 4,
+	I40E_RX_DESC_ERROR_EIPE_SHIFT		= 5,
+	I40E_RX_DESC_ERROR_OVERSIZE_SHIFT	= 6
+};
+
+enum i40e_rx_desc_error_l3l4e_fcoe_masks {
+	I40E_RX_DESC_ERROR_L3L4E_NONE		= 0,
+	I40E_RX_DESC_ERROR_L3L4E_PROT		= 1,
+	I40E_RX_DESC_ERROR_L3L4E_FC		= 2,
+	I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR	= 3,
+	I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN	= 4
+};
+
+#define I40E_RXD_QW1_PTYPE_SHIFT	30
+#define I40E_RXD_QW1_PTYPE_MASK		(0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum i40e_rx_l2_ptype {
+	I40E_RX_PTYPE_L2_RESERVED			= 0,
+	I40E_RX_PTYPE_L2_MAC_PAY2			= 1,
+	I40E_RX_PTYPE_L2_TIMESYNC_PAY2			= 2,
+	I40E_RX_PTYPE_L2_FIP_PAY2			= 3,
+	I40E_RX_PTYPE_L2_OUI_PAY2			= 4,
+	I40E_RX_PTYPE_L2_MACCNTRL_PAY2			= 5,
+	I40E_RX_PTYPE_L2_LLDP_PAY2			= 6,
+	I40E_RX_PTYPE_L2_ECP_PAY2			= 7,
+	I40E_RX_PTYPE_L2_EVB_PAY2			= 8,
+	I40E_RX_PTYPE_L2_QCN_PAY2			= 9,
+	I40E_RX_PTYPE_L2_EAPOL_PAY2			= 10,
+	I40E_RX_PTYPE_L2_ARP				= 11,
+	I40E_RX_PTYPE_L2_FCOE_PAY3			= 12,
+	I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3		= 13,
+	I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3		= 14,
+	I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3		= 15,
+	I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA		= 16,
+	I40E_RX_PTYPE_L2_FCOE_VFT_PAY3			= 17,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA		= 18,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY			= 19,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP			= 20,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER		= 21,
+	I40E_RX_PTYPE_GRENAT4_MAC_PAY3			= 58,
+	I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4	= 87,
+	I40E_RX_PTYPE_GRENAT6_MAC_PAY3			= 124,
+	I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4	= 153
+};
+
+struct i40e_rx_ptype_decoded {
+	u32 ptype:8;
+	u32 known:1;
+	u32 outer_ip:1;
+	u32 outer_ip_ver:1;
+	u32 outer_frag:1;
+	u32 tunnel_type:3;
+	u32 tunnel_end_prot:2;
+	u32 tunnel_end_frag:1;
+	u32 inner_prot:4;
+	u32 payload_layer:3;
+};
+
+enum i40e_rx_ptype_outer_ip {
+	I40E_RX_PTYPE_OUTER_L2	= 0,
+	I40E_RX_PTYPE_OUTER_IP	= 1
+};
+
+enum i40e_rx_ptype_outer_ip_ver {
+	I40E_RX_PTYPE_OUTER_NONE	= 0,
+	I40E_RX_PTYPE_OUTER_IPV4	= 0,
+	I40E_RX_PTYPE_OUTER_IPV6	= 1
+};
+
+enum i40e_rx_ptype_outer_fragmented {
+	I40E_RX_PTYPE_NOT_FRAG	= 0,
+	I40E_RX_PTYPE_FRAG	= 1
+};
+
+enum i40e_rx_ptype_tunnel_type {
+	I40E_RX_PTYPE_TUNNEL_NONE		= 0,
+	I40E_RX_PTYPE_TUNNEL_IP_IP		= 1,
+	I40E_RX_PTYPE_TUNNEL_IP_GRENAT		= 2,
+	I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC	= 3,
+	I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN	= 4,
+};
+
+enum i40e_rx_ptype_tunnel_end_prot {
+	I40E_RX_PTYPE_TUNNEL_END_NONE	= 0,
+	I40E_RX_PTYPE_TUNNEL_END_IPV4	= 1,
+	I40E_RX_PTYPE_TUNNEL_END_IPV6	= 2,
+};
+
+enum i40e_rx_ptype_inner_prot {
+	I40E_RX_PTYPE_INNER_PROT_NONE		= 0,
+	I40E_RX_PTYPE_INNER_PROT_UDP		= 1,
+	I40E_RX_PTYPE_INNER_PROT_TCP		= 2,
+	I40E_RX_PTYPE_INNER_PROT_SCTP		= 3,
+	I40E_RX_PTYPE_INNER_PROT_ICMP		= 4,
+	I40E_RX_PTYPE_INNER_PROT_TIMESYNC	= 5
+};
+
+enum i40e_rx_ptype_payload_layer {
+	I40E_RX_PTYPE_PAYLOAD_LAYER_NONE	= 0,
+	I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2	= 1,
+	I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3	= 2,
+	I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4	= 3,
+};
+
+#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT	38
+#define I40E_RXD_QW1_LENGTH_PBUF_MASK	(0x3FFFULL << \
+					 I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT	52
+#define I40E_RXD_QW1_LENGTH_HBUF_MASK	(0x7FFULL << \
+					 I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_SPH_SHIFT	63
+#define I40E_RXD_QW1_LENGTH_SPH_MASK	(0x1ULL << \
+					 I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+
+enum i40e_rx_desc_ext_status_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT	= 0,
+	I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT	= 1,
+	I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT	= 2, /* 2 BITS */
+	I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT	= 4, /* 2 BITS */
+	I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT	= 6, /* 3 BITS */
+	I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT	= 9,
+	I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT	= 10,
+	I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT	= 11,
+};
+
+enum i40e_rx_desc_pe_status_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_DESC_PE_STATUS_QPID_SHIFT	= 0, /* 18 BITS */
+	I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT	= 0, /* 16 BITS */
+	I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT	= 16, /* 8 BITS */
+	I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT	= 24,
+	I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT	= 25,
+	I40E_RX_DESC_PE_STATUS_PORTV_SHIFT	= 26,
+	I40E_RX_DESC_PE_STATUS_URG_SHIFT	= 27,
+	I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT	= 28,
+	I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT	= 29
+};
+
+#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT		38
+#define I40E_RX_PROG_STATUS_DESC_LENGTH			0x2000000
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT	2
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK	(0x7UL << \
+				I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT	19
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK		(0x3FUL << \
+				I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum i40e_rx_prog_status_desc_status_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_PROG_STATUS_DESC_DD_SHIFT	= 0,
+	I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT	= 2 /* 3 BITS */
+};
+
+enum i40e_rx_prog_status_desc_prog_id_masks {
+	I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS	= 1,
+	I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS	= 2,
+	I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS	= 4,
+};
+
+enum i40e_rx_prog_status_desc_error_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT	= 0,
+	I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT	= 1,
+	I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT	= 2,
+	I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT	= 3
+};
+
+/* TX Descriptor */
+struct i40e_tx_desc {
+	__le64 buffer_addr; /* Address of descriptor's data buf */
+	__le64 cmd_type_offset_bsz;
+};
+
+#define I40E_TXD_QW1_DTYPE_SHIFT	0
+#define I40E_TXD_QW1_DTYPE_MASK		(0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
+
+enum i40e_tx_desc_dtype_value {
+	I40E_TX_DESC_DTYPE_DATA		= 0x0,
+	I40E_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
+	I40E_TX_DESC_DTYPE_CONTEXT	= 0x1,
+	I40E_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	I40E_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
+	I40E_TX_DESC_DTYPE_DDP_CTX	= 0x9,
+	I40E_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
+	I40E_TX_DESC_DTYPE_FLEX_CTX_1	= 0xC,
+	I40E_TX_DESC_DTYPE_FLEX_CTX_2	= 0xD,
+	I40E_TX_DESC_DTYPE_DESC_DONE	= 0xF
+};
+
+#define I40E_TXD_QW1_CMD_SHIFT	4
+#define I40E_TXD_QW1_CMD_MASK	(0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
+
+enum i40e_tx_desc_cmd_bits {
+	I40E_TX_DESC_CMD_EOP			= 0x0001,
+	I40E_TX_DESC_CMD_RS			= 0x0002,
+	I40E_TX_DESC_CMD_ICRC			= 0x0004,
+	I40E_TX_DESC_CMD_IL2TAG1		= 0x0008,
+	I40E_TX_DESC_CMD_DUMMY			= 0x0010,
+	I40E_TX_DESC_CMD_IIPT_NONIP		= 0x0000, /* 2 BITS */
+	I40E_TX_DESC_CMD_IIPT_IPV6		= 0x0020, /* 2 BITS */
+	I40E_TX_DESC_CMD_IIPT_IPV4		= 0x0040, /* 2 BITS */
+	I40E_TX_DESC_CMD_IIPT_IPV4_CSUM		= 0x0060, /* 2 BITS */
+	I40E_TX_DESC_CMD_FCOET			= 0x0080,
+	I40E_TX_DESC_CMD_L4T_EOFT_UNK		= 0x0000, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_TCP		= 0x0100, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_SCTP		= 0x0200, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_UDP		= 0x0300, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_EOF_N		= 0x0000, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_EOF_T		= 0x0100, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI	= 0x0200, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_EOF_A		= 0x0300, /* 2 BITS */
+};
+
+#define I40E_TXD_QW1_OFFSET_SHIFT	16
+#define I40E_TXD_QW1_OFFSET_MASK	(0x3FFFFULL << \
+					 I40E_TXD_QW1_OFFSET_SHIFT)
+
+enum i40e_tx_desc_length_fields {
+	/* Note: These are predefined bit offsets */
+	I40E_TX_DESC_LENGTH_MACLEN_SHIFT	= 0, /* 7 BITS */
+	I40E_TX_DESC_LENGTH_IPLEN_SHIFT		= 7, /* 7 BITS */
+	I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT	= 14 /* 4 BITS */
+};
+
+#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT	34
+#define I40E_TXD_QW1_TX_BUF_SZ_MASK	(0x3FFFULL << \
+					 I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define I40E_TXD_QW1_L2TAG1_SHIFT	48
+#define I40E_TXD_QW1_L2TAG1_MASK	(0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct i40e_tx_context_desc {
+	__le32 tunneling_params;
+	__le16 l2tag2;
+	__le16 rsvd;
+	__le64 type_cmd_tso_mss;
+};
+
+#define I40E_TXD_CTX_QW1_DTYPE_SHIFT	0
+#define I40E_TXD_CTX_QW1_DTYPE_MASK	(0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_CTX_QW1_CMD_SHIFT	4
+#define I40E_TXD_CTX_QW1_CMD_MASK	(0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
+
+enum i40e_tx_ctx_desc_cmd_bits {
+	I40E_TX_CTX_DESC_TSO		= 0x01,
+	I40E_TX_CTX_DESC_TSYN		= 0x02,
+	I40E_TX_CTX_DESC_IL2TAG2	= 0x04,
+	I40E_TX_CTX_DESC_IL2TAG2_IL2H	= 0x08,
+	I40E_TX_CTX_DESC_SWTCH_NOTAG	= 0x00,
+	I40E_TX_CTX_DESC_SWTCH_UPLINK	= 0x10,
+	I40E_TX_CTX_DESC_SWTCH_LOCAL	= 0x20,
+	I40E_TX_CTX_DESC_SWTCH_VSI	= 0x30,
+	I40E_TX_CTX_DESC_SWPE		= 0x40
+};
+
+#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT	30
+#define I40E_TXD_CTX_QW1_TSO_LEN_MASK	(0x3FFFFULL << \
+					 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define I40E_TXD_CTX_QW1_MSS_SHIFT	50
+#define I40E_TXD_CTX_QW1_MSS_MASK	(0x3FFFULL << \
+					 I40E_TXD_CTX_QW1_MSS_SHIFT)
+
+#define I40E_TXD_CTX_QW1_VSI_SHIFT	50
+#define I40E_TXD_CTX_QW1_VSI_MASK	(0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT	0
+#define I40E_TXD_CTX_QW0_EXT_IP_MASK	(0x3ULL << \
+					 I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum i40e_tx_ctx_desc_eipt_offload {
+	I40E_TX_CTX_EXT_IP_NONE		= 0x0,
+	I40E_TX_CTX_EXT_IP_IPV6		= 0x1,
+	I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM	= 0x2,
+	I40E_TX_CTX_EXT_IP_IPV4		= 0x3
+};
+
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT	2
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK	(0x3FULL << \
+					 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_NATT_SHIFT	9
+#define I40E_TXD_CTX_QW0_NATT_MASK	(0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_UDP_TUNNELING	(0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_GRE_TUNNELING	(0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT	11
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK	(0x1ULL << \
+					 I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST	I40E_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define I40E_TXD_CTX_QW0_NATLEN_SHIFT	12
+#define I40E_TXD_CTX_QW0_NATLEN_MASK	(0X7FULL << \
+					 I40E_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_DECTTL_SHIFT	19
+#define I40E_TXD_CTX_QW0_DECTTL_MASK	(0xFULL << \
+					 I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+
+struct i40e_filter_program_desc {
+	__le32 qindex_flex_ptype_vsi;
+	__le32 rsvd;
+	__le32 dtype_cmd_cntindex;
+	__le32 fd_id;
+};
+#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT	0
+#define I40E_TXD_FLTR_QW0_QINDEX_MASK	(0x7FFUL << \
+					 I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT	11
+#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK	(0x7UL << \
+					 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT	17
+#define I40E_TXD_FLTR_QW0_PCTYPE_MASK	(0x3FUL << \
+					 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum i40e_filter_pctype {
+	/* Note: Values 0-28 are reserved for future use */
+	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP	= 29,
+	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP	= 30,
+	I40E_FILTER_PCTYPE_NONF_IPV4_UDP		= 31,
+	I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN		= 32,
+	I40E_FILTER_PCTYPE_NONF_IPV4_TCP		= 33,
+	I40E_FILTER_PCTYPE_NONF_IPV4_SCTP		= 34,
+	I40E_FILTER_PCTYPE_NONF_IPV4_OTHER		= 35,
+	I40E_FILTER_PCTYPE_FRAG_IPV4			= 36,
+	/* Note: Values 37-38 are reserved for future use */
+	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP	= 39,
+	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP	= 40,
+	I40E_FILTER_PCTYPE_NONF_IPV6_UDP		= 41,
+	I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN		= 42,
+	I40E_FILTER_PCTYPE_NONF_IPV6_TCP		= 43,
+	I40E_FILTER_PCTYPE_NONF_IPV6_SCTP		= 44,
+	I40E_FILTER_PCTYPE_NONF_IPV6_OTHER		= 45,
+	I40E_FILTER_PCTYPE_FRAG_IPV6			= 46,
+	/* Note: Value 47 is reserved for future use */
+	I40E_FILTER_PCTYPE_FCOE_OX			= 48,
+	I40E_FILTER_PCTYPE_FCOE_RX			= 49,
+	I40E_FILTER_PCTYPE_FCOE_OTHER			= 50,
+	/* Note: Values 51-62 are reserved for future use */
+	I40E_FILTER_PCTYPE_L2_PAYLOAD			= 63,
+};
+
+enum i40e_filter_program_desc_dest {
+	I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET		= 0x0,
+	I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX	= 0x1,
+	I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER	= 0x2,
+};
+
+enum i40e_filter_program_desc_fd_status {
+	I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE			= 0x0,
+	I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID		= 0x1,
+	I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES	= 0x2,
+	I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES		= 0x3,
+};
+
+#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT	23
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK	(0x1FFUL << \
+					 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CMD_SHIFT	4
+#define I40E_TXD_FLTR_QW1_CMD_MASK	(0xFFFFULL << \
+					 I40E_TXD_FLTR_QW1_CMD_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_PCMD_SHIFT	(0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_PCMD_MASK	(0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
+
+enum i40e_filter_program_desc_pcmd {
+	I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE	= 0x1,
+	I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE		= 0x2,
+};
+
+#define I40E_TXD_FLTR_QW1_DEST_SHIFT	(0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_DEST_MASK	(0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT	(0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK	(0x1ULL << \
+					 I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT	(0x9ULL + \
+						 I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+					  I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK	(0x1FFUL << \
+					 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum i40e_filter_type {
+	I40E_FLOW_DIRECTOR_FLTR = 0,
+	I40E_PE_QUAD_HASH_FLTR = 1,
+	I40E_ETHERTYPE_FLTR,
+	I40E_FCOE_CTX_FLTR,
+	I40E_MAC_VLAN_FLTR,
+	I40E_HASH_FLTR
+};
+
+struct i40e_vsi_context {
+	u16 seid;
+	u16 uplink_seid;
+	u16 vsi_number;
+	u16 vsis_allocated;
+	u16 vsis_unallocated;
+	u16 flags;
+	u8 pf_num;
+	u8 vf_num;
+	u8 connection_type;
+	struct i40e_aqc_vsi_properties_data info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct i40e_eth_stats {
+	u64 rx_bytes;			/* gorc */
+	u64 rx_unicast;			/* uprc */
+	u64 rx_multicast;		/* mprc */
+	u64 rx_broadcast;		/* bprc */
+	u64 rx_discards;		/* rdpc */
+	u64 rx_errors;			/* repc */
+	u64 rx_missed;			/* rmpc */
+	u64 rx_unknown_protocol;	/* rupp */
+	u64 tx_bytes;			/* gotc */
+	u64 tx_unicast;			/* uptc */
+	u64 tx_multicast;		/* mptc */
+	u64 tx_broadcast;		/* bptc */
+	u64 tx_discards;		/* tdpc */
+	u64 tx_errors;			/* tepc */
+};
+
+/* Statistics collected by the MAC */
+struct i40e_hw_port_stats {
+	/* eth stats collected by the port */
+	struct i40e_eth_stats eth;
+
+	/* additional port specific stats */
+	u64 tx_dropped_link_down;	/* tdold */
+	u64 crc_errors;			/* crcerrs */
+	u64 illegal_bytes;		/* illerrc */
+	u64 error_bytes;		/* errbc */
+	u64 mac_local_faults;		/* mlfc */
+	u64 mac_remote_faults;		/* mrfc */
+	u64 rx_length_errors;		/* rlec */
+	u64 link_xon_rx;		/* lxonrxc */
+	u64 link_xoff_rx;		/* lxoffrxc */
+	u64 priority_xon_rx[8];		/* pxonrxc[8] */
+	u64 priority_xoff_rx[8];	/* pxoffrxc[8] */
+	u64 link_xon_tx;		/* lxontxc */
+	u64 link_xoff_tx;		/* lxofftxc */
+	u64 priority_xon_tx[8];		/* pxontxc[8] */
+	u64 priority_xoff_tx[8];	/* pxofftxc[8] */
+	u64 priority_xon_2_xoff[8];	/* pxon2offc[8] */
+	u64 rx_size_64;			/* prc64 */
+	u64 rx_size_127;		/* prc127 */
+	u64 rx_size_255;		/* prc255 */
+	u64 rx_size_511;		/* prc511 */
+	u64 rx_size_1023;		/* prc1023 */
+	u64 rx_size_1522;		/* prc1522 */
+	u64 rx_size_big;		/* prc9522 */
+	u64 rx_undersize;		/* ruc */
+	u64 rx_fragments;		/* rfc */
+	u64 rx_oversize;		/* roc */
+	u64 rx_jabber;			/* rjc */
+	u64 tx_size_64;			/* ptc64 */
+	u64 tx_size_127;		/* ptc127 */
+	u64 tx_size_255;		/* ptc255 */
+	u64 tx_size_511;		/* ptc511 */
+	u64 tx_size_1023;		/* ptc1023 */
+	u64 tx_size_1522;		/* ptc1522 */
+	u64 tx_size_big;		/* ptc9522 */
+	u64 mac_short_packet_dropped;	/* mspdc */
+	u64 checksum_error;		/* xec */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define I40E_SR_NVM_CONTROL_WORD		0x00
+#define I40E_SR_EMP_MODULE_PTR			0x0F
+#define I40E_SR_NVM_IMAGE_VERSION		0x18
+#define I40E_SR_NVM_WAKE_ON_LAN			0x19
+#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR	0x27
+#define I40E_SR_NVM_EETRACK_LO			0x2D
+#define I40E_SR_NVM_EETRACK_HI			0x2E
+#define I40E_SR_VPD_PTR				0x2F
+#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR		0x3E
+#define I40E_SR_SW_CHECKSUM_WORD		0x3F
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define I40E_SR_VPD_MODULE_MAX_SIZE		1024
+#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE	1024
+#define I40E_SR_CONTROL_WORD_1_SHIFT		0x06
+#define I40E_SR_CONTROL_WORD_1_MASK	(0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+
+/* Shadow RAM related */
+#define I40E_SR_SECTOR_SIZE_IN_WORDS	0x800
+#define I40E_SR_WORDS_IN_1KB		512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define I40E_SR_SW_CHECKSUM_BASE	0xBABA
+
+#define I40E_SRRD_SRCTL_ATTEMPTS	100000
+
+enum i40e_switch_element_types {
+	I40E_SWITCH_ELEMENT_TYPE_MAC	= 1,
+	I40E_SWITCH_ELEMENT_TYPE_PF	= 2,
+	I40E_SWITCH_ELEMENT_TYPE_VF	= 3,
+	I40E_SWITCH_ELEMENT_TYPE_EMP	= 4,
+	I40E_SWITCH_ELEMENT_TYPE_BMC	= 6,
+	I40E_SWITCH_ELEMENT_TYPE_PE	= 16,
+	I40E_SWITCH_ELEMENT_TYPE_VEB	= 17,
+	I40E_SWITCH_ELEMENT_TYPE_PA	= 18,
+	I40E_SWITCH_ELEMENT_TYPE_VSI	= 19,
+};
+
+/* Supported EtherType filters */
+enum i40e_ether_type_index {
+	I40E_ETHER_TYPE_1588		= 0,
+	I40E_ETHER_TYPE_FIP		= 1,
+	I40E_ETHER_TYPE_OUI_EXTENDED	= 2,
+	I40E_ETHER_TYPE_MAC_CONTROL	= 3,
+	I40E_ETHER_TYPE_LLDP		= 4,
+	I40E_ETHER_TYPE_EVB_PROTOCOL1	= 5,
+	I40E_ETHER_TYPE_EVB_PROTOCOL2	= 6,
+	I40E_ETHER_TYPE_QCN_CNM		= 7,
+	I40E_ETHER_TYPE_8021X		= 8,
+	I40E_ETHER_TYPE_ARP		= 9,
+	I40E_ETHER_TYPE_RSV1		= 10,
+	I40E_ETHER_TYPE_RSV2		= 11,
+};
+
+/* Filter context base size is 1K */
+#define I40E_HASH_FILTER_BASE_SIZE	1024
+/* Supported Hash filter values */
+enum i40e_hash_filter_size {
+	I40E_HASH_FILTER_SIZE_1K	= 0,
+	I40E_HASH_FILTER_SIZE_2K	= 1,
+	I40E_HASH_FILTER_SIZE_4K	= 2,
+	I40E_HASH_FILTER_SIZE_8K	= 3,
+	I40E_HASH_FILTER_SIZE_16K	= 4,
+	I40E_HASH_FILTER_SIZE_32K	= 5,
+	I40E_HASH_FILTER_SIZE_64K	= 6,
+	I40E_HASH_FILTER_SIZE_128K	= 7,
+	I40E_HASH_FILTER_SIZE_256K	= 8,
+	I40E_HASH_FILTER_SIZE_512K	= 9,
+	I40E_HASH_FILTER_SIZE_1M	= 10,
+};
+
+/* DMA context base size is 0.5K */
+#define I40E_DMA_CNTX_BASE_SIZE		512
+/* Supported DMA context values */
+enum i40e_dma_cntx_size {
+	I40E_DMA_CNTX_SIZE_512		= 0,
+	I40E_DMA_CNTX_SIZE_1K		= 1,
+	I40E_DMA_CNTX_SIZE_2K		= 2,
+	I40E_DMA_CNTX_SIZE_4K		= 3,
+	I40E_DMA_CNTX_SIZE_8K		= 4,
+	I40E_DMA_CNTX_SIZE_16K		= 5,
+	I40E_DMA_CNTX_SIZE_32K		= 6,
+	I40E_DMA_CNTX_SIZE_64K		= 7,
+	I40E_DMA_CNTX_SIZE_128K		= 8,
+	I40E_DMA_CNTX_SIZE_256K		= 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum i40e_hash_lut_size {
+	I40E_HASH_LUT_SIZE_128		= 0,
+	I40E_HASH_LUT_SIZE_512		= 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct i40e_filter_control_settings {
+	/* number of PE Quad Hash filter buckets */
+	enum i40e_hash_filter_size pe_filt_num;
+	/* number of PE Quad Hash contexts */
+	enum i40e_dma_cntx_size pe_cntx_num;
+	/* number of FCoE filter buckets */
+	enum i40e_hash_filter_size fcoe_filt_num;
+	/* number of FCoE DDP contexts */
+	enum i40e_dma_cntx_size fcoe_cntx_num;
+	/* size of the Hash LUT */
+	enum i40e_hash_lut_size	hash_lut_size;
+	/* enable FDIR filters for PF and its VFs */
+	bool enable_fdir;
+	/* enable Ethertype filters for PF and its VFs */
+	bool enable_ethtype;
+	/* enable MAC/VLAN filters for PF and its VFs */
+	bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct i40e_control_filter_stats {
+	u16 mac_etype_used;   /* Used perfect match MAC/EtherType filters */
+	u16 etype_used;       /* Used perfect EtherType filters */
+	u16 mac_etype_free;   /* Un-used perfect match MAC/EtherType filters */
+	u16 etype_free;       /* Un-used perfect EtherType filters */
+};
+
+enum i40e_reset_type {
+	I40E_RESET_POR		= 0,
+	I40E_RESET_CORER	= 1,
+	I40E_RESET_GLOBR	= 2,
+	I40E_RESET_EMPR		= 3,
+};
+#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
new file mode 100644
index 000000000000..ccf45d04b7ef
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -0,0 +1,364 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_H_
+#define _I40E_VIRTCHNL_H_
+
+#include "i40e_type.h"
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the various i40e drivers.
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * Firmware copies the cookie fields when sending messages between the PF and
+ * VF, but uses all other fields internally. Due to this limitation, we
+ * must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the vsi indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI.  Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value is of
+ * i40e_status_code type, defined in the i40e_type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of these
+ * opcodes. The VF driver must first validate the API version of the PF driver,
+ * then request a reset, then get resources, then configure queues and
+ * interrupts. After these operations are complete, the VF driver may start
+ * its queues, optionally add MAC and VLAN filters, and process traffic.
+ */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum i40e_virtchnl_ops {
+/* VF sends req. to pf for the following
+ * ops.
+ */
+	I40E_VIRTCHNL_OP_UNKNOWN = 0,
+	I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+	I40E_VIRTCHNL_OP_RESET_VF,
+	I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+	I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
+	I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
+	I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+	I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+	I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+	I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+	I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+	I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+	I40E_VIRTCHNL_OP_ADD_VLAN,
+	I40E_VIRTCHNL_OP_DEL_VLAN,
+	I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+	I40E_VIRTCHNL_OP_GET_STATS,
+	I40E_VIRTCHNL_OP_FCOE,
+/* PF sends status change events to vfs using
+ * the following op.
+ */
+	I40E_VIRTCHNL_OP_EVENT,
+};
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct i40e_virtchnl_msg {
+	u8 pad[8];			 /* AQ flags/opcode/len/retval fields */
+	enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+	i40e_status v_retval;  /* ditto for desc->retval */
+	u32 vfid;			 /* used by PF when sending to VF */
+};
+
+/* Message descriptions and data structures.*/
+
+/* I40E_VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define I40E_VIRTCHNL_VERSION_MAJOR		1
+#define I40E_VIRTCHNL_VERSION_MINOR		0
+struct i40e_virtchnl_version_info {
+	u32 major;
+	u32 minor;
+};
+
+/* I40E_VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
+ * VF sends this request to PF with no parameters
+ * PF responds with an indirect message containing
+ * i40e_virtchnl_vf_resource and one or more
+ * i40e_virtchnl_vsi_resource structures.
+ */
+
+struct i40e_virtchnl_vsi_resource {
+	u16 vsi_id;
+	u16 num_queue_pairs;
+	enum i40e_vsi_type vsi_type;
+	u16 qset_handle;
+	u8 default_mac_addr[ETH_ALEN];
+};
+/* VF offload flags */
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2	0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE	0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN	0x00010000
+
+struct i40e_virtchnl_vf_resource {
+	u16 num_vsis;
+	u16 num_queue_pairs;
+	u16 max_vectors;
+	u16 max_mtu;
+
+	u32 vf_offload_flags;
+	u32 max_fcoe_contexts;
+	u32 max_fcoe_filters;
+
+	struct i40e_virtchnl_vsi_resource vsi_res[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of i40e_virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct i40e_virtchnl_txq_info {
+	u16 vsi_id;
+	u16 queue_id;
+	u16 ring_len;		/* number of descriptors, multiple of 8 */
+	u16 headwb_enabled;
+	u64 dma_ring_addr;
+	u64 dma_headwb_addr;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of i40e_virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct i40e_virtchnl_rxq_info {
+	u16 vsi_id;
+	u16 queue_id;
+	u32 ring_len;		/* number of descriptors, multiple of 32 */
+	u16 hdr_size;
+	u16 splithdr_enabled;
+	u32 databuffer_size;
+	u32 max_pkt_size;
+	u64 dma_ring_addr;
+	enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct i40e_virtchnl_queue_pair_info {
+	/* NOTE: vsi_id and queue_id should be identical for both queues. */
+	struct i40e_virtchnl_txq_info txq;
+	struct i40e_virtchnl_rxq_info rxq;
+};
+
+struct i40e_virtchnl_vsi_queue_config_info {
+	u16 vsi_id;
+	u16 num_queue_pairs;
+	struct i40e_virtchnl_queue_pair_info qpair[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct i40e_virtchnl_vector_map {
+	u16 vsi_id;
+	u16 vector_id;
+	u16 rxq_map;
+	u16 txq_map;
+	u16 rxitr_idx;
+	u16 txitr_idx;
+};
+
+struct i40e_virtchnl_irq_map_info {
+	u16 num_vectors;
+	struct i40e_virtchnl_vector_map vecmap[1];
+};
+
+/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
+ * I40E_VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct i40e_virtchnl_queue_select {
+	u16 vsi_id;
+	u16 pad;
+	u32 rx_queues;
+	u32 tx_queues;
+};
+
+/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct i40e_virtchnl_ether_addr {
+	u8 addr[ETH_ALEN];
+	u8 pad[2];
+};
+
+struct i40e_virtchnl_ether_addr_list {
+	u16 vsi_id;
+	u16 num_elements;
+	struct i40e_virtchnl_ether_addr list[1];
+};
+
+/* I40E_VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct i40e_virtchnl_vlan_filter_list {
+	u16 vsi_id;
+	u16 num_elements;
+	u16 vlan_id[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct i40e_virtchnl_promisc_info {
+	u16 vsi_id;
+	u16 flags;
+};
+
+#define I40E_FLAG_VF_UNICAST_PROMISC	0x00000001
+#define I40E_FLAG_VF_MULTICAST_PROMISC	0x00000002
+
+/* I40E_VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct i40e_eth_stats in an external buffer.
+ */
+
+/* I40E_VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum i40e_virtchnl_event_codes {
+	I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
+	I40E_VIRTCHNL_EVENT_LINK_CHANGE,
+	I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
+	I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+#define I40E_PF_EVENT_SEVERITY_INFO		0
+#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM	255
+
+struct i40e_virtchnl_pf_event {
+	enum i40e_virtchnl_event_codes event;
+	union {
+		struct {
+			enum i40e_aq_link_speed link_speed;
+			bool link_status;
+		} link_event;
+	} event_data;
+
+	int severity;
+};
+
+/* The following are TBD, not necessary for LAN functionality.
+ * I40E_VIRTCHNL_OP_FCOE
+ */
+
+/* VF reset states - these are written into the RSTAT register:
+ * I40E_VFGEN_RSTAT1 on the PF
+ * I40E_VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum i40e_vfr_states {
+	I40E_VFR_INPROGRESS = 0,
+	I40E_VFR_COMPLETED,
+	I40E_VFR_VFACTIVE,
+	I40E_VFR_UNKNOWN,
+};
+
+#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
new file mode 100644
index 000000000000..ff6529b288a1
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -0,0 +1,321 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40EVF_H_
+#define _I40EVF_H_
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <net/udp.h>
+#include <linux/sctp.h>
+
+
+#include "i40e_type.h"
+#include "i40e_virtchnl.h"
+#include "i40e_txrx.h"
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+#define PFX "i40evf: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+	((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+	printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+		__func__ , ## args)))
+
+/* dummy struct to make common code less painful */
+struct i40e_vsi {
+	struct i40evf_adapter *back;
+	struct net_device *netdev;
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+	u16 seid;
+	u16 id;
+	unsigned long state;
+	int base_vector;
+	u16 work_limit;
+	/* high bit set means dynamic, use accessor routines to read/write.
+	 * hardware only supports 2us resolution for the ITR registers.
+	 * these values always store the USER setting, and must be converted
+	 * before programming to a register.
+	 */
+	u16 rx_itr_setting;
+	u16 tx_itr_setting;
+};
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40EVF_RX_BUFFER_WRITE	16	/* Must be power of 2 */
+#define I40EVF_DEFAULT_TXD   512
+#define I40EVF_DEFAULT_RXD   512
+#define I40EVF_MAX_TXD       4096
+#define I40EVF_MIN_TXD       64
+#define I40EVF_MAX_RXD       4096
+#define I40EVF_MIN_RXD       64
+#define I40EVF_REQ_DESCRIPTOR_MULTIPLE  8
+
+/* Supported Rx Buffer Sizes */
+#define I40EVF_RXBUFFER_64    64     /* Used for packet split */
+#define I40EVF_RXBUFFER_128   128    /* Used for packet split */
+#define I40EVF_RXBUFFER_256   256    /* Used for packet split */
+#define I40EVF_RXBUFFER_2048  2048
+#define I40EVF_MAX_RXBUFFER   16384  /* largest size for single descriptor */
+#define I40EVF_MAX_AQ_BUF_SIZE    4096
+#define I40EVF_AQ_LEN             32
+#define I40EVF_AQ_MAX_ERR         10 /* times to try before resetting AQ */
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define I40E_RX_DESC(R, i) (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
+#define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i]))
+#define I40E_TX_CTXTDESC(R, i) \
+	(&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
+#define MAX_RX_QUEUES 8
+#define MAX_TX_QUEUES MAX_RX_QUEUES
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct i40e_q_vector {
+	struct i40evf_adapter *adapter;
+	struct i40e_vsi *vsi;
+	struct napi_struct napi;
+	unsigned long reg_idx;
+	struct i40e_ring_container rx;
+	struct i40e_ring_container tx;
+	u32 ring_mask;
+	u8 num_ringpairs;	/* total number of ring pairs in vector */
+	int v_idx;	  /* vector index in list */
+	char name[IFNAMSIZ + 9];
+	cpumask_var_t affinity_mask;
+};
+
+/* Helper macros to switch between ints/sec and what the register uses.
+ * And yes, it's the same math going both ways.  The lowest value
+ * supported by all of the i40e hardware is 8.
+ */
+#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
+	((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
+#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+
+#define I40EVF_DESC_UNUSED(R) \
+	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+	(R)->next_to_clean - (R)->next_to_use - 1)
+
+#define I40EVF_RX_DESC_ADV(R, i)	    \
+	(&(((union i40e_adv_rx_desc *)((R).desc))[i]))
+#define I40EVF_TX_DESC_ADV(R, i)	    \
+	(&(((union i40e_adv_tx_desc *)((R).desc))[i]))
+#define I40EVF_TX_CTXTDESC_ADV(R, i)	    \
+	(&(((struct i40e_adv_tx_context_desc *)((R).desc))[i]))
+
+#define OTHER_VECTOR 1
+#define NONQ_VECS (OTHER_VECTOR)
+
+#define MAX_MSIX_Q_VECTORS 4
+#define MAX_MSIX_COUNT 5
+
+#define MIN_MSIX_Q_VECTORS 1
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS)
+
+#define I40EVF_QUEUE_END_OF_LIST 0x7FF
+#define I40EVF_FREE_VECTOR 0x7FFF
+struct i40evf_mac_filter {
+	struct list_head list;
+	u8 macaddr[ETH_ALEN];
+	bool remove;		/* filter needs to be removed */
+	bool add;		/* filter needs to be added */
+};
+
+struct i40evf_vlan_filter {
+	struct list_head list;
+	u16 vlan;
+	bool remove;		/* filter needs to be removed */
+	bool add;		/* filter needs to be added */
+};
+
+/* Driver state. The order of these is important! */
+enum i40evf_state_t {
+	__I40EVF_STARTUP,		/* driver loaded, probe complete */
+	__I40EVF_FAILED,		/* PF communication failed. Fatal. */
+	__I40EVF_REMOVE,		/* driver is being unloaded */
+	__I40EVF_INIT_VERSION_CHECK,	/* aq msg sent, awaiting reply */
+	__I40EVF_INIT_GET_RESOURCES,	/* aq msg sent, awaiting reply */
+	__I40EVF_INIT_SW,		/* got resources, setting up structs */
+	/* Below here, watchdog is running */
+	__I40EVF_DOWN,			/* ready, can be opened */
+	__I40EVF_TESTING,		/* in ethtool self-test */
+	__I40EVF_RESETTING,		/* in reset */
+	__I40EVF_RUNNING,		/* opened, working */
+};
+
+enum i40evf_critical_section_t {
+	__I40EVF_IN_CRITICAL_TASK,	/* cannot be interrupted */
+};
+/* make common code happy */
+#define __I40E_DOWN __I40EVF_DOWN
+
+/* board specific private data structure */
+struct i40evf_adapter {
+	struct timer_list watchdog_timer;
+	struct vlan_group *vlgrp;
+	struct work_struct reset_task;
+	struct work_struct adminq_task;
+	struct delayed_work init_task;
+	struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+	struct list_head vlan_filter_list;
+	char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
+
+	/* Interrupt Throttle Rate */
+	u32 itr_setting;
+	u16 eitr_low;
+	u16 eitr_high;
+
+	/* TX */
+	struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
+	u64 restart_queue;
+	u64 hw_csum_tx_good;
+	u64 lsc_int;
+	u64 hw_tso_ctxt;
+	u64 hw_tso6_ctxt;
+	u32 tx_timeout_count;
+	struct list_head mac_filter_list;
+#ifdef DEBUG
+	bool detect_tx_hung;
+#endif /* DEBUG */
+
+	/* RX */
+	struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
+	int txd_count;
+	int rxd_count;
+	u64 hw_csum_rx_error;
+	u64 hw_rx_no_dma_resources;
+	u64 hw_csum_rx_good;
+	u64 non_eop_descs;
+	int num_msix_vectors;
+	struct msix_entry *msix_entries;
+
+	u64 rx_hdr_split;
+
+	u32 init_state;
+	volatile unsigned long flags;
+#define I40EVF_FLAG_RX_CSUM_ENABLED              (u32)(1)
+#define I40EVF_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 1)
+#define I40EVF_FLAG_RX_PS_CAPABLE                (u32)(1 << 2)
+#define I40EVF_FLAG_RX_PS_ENABLED                (u32)(1 << 3)
+#define I40EVF_FLAG_IN_NETPOLL                   (u32)(1 << 4)
+#define I40EVF_FLAG_IMIR_ENABLED                 (u32)(1 << 5)
+#define I40EVF_FLAG_MQ_CAPABLE                   (u32)(1 << 6)
+#define I40EVF_FLAG_NEED_LINK_UPDATE             (u32)(1 << 7)
+/* duplcates for common code */
+#define I40E_FLAG_FDIR_ATR_ENABLED		 0
+#define I40E_FLAG_DCB_ENABLED			 0
+#define I40E_FLAG_IN_NETPOLL			 I40EVF_FLAG_IN_NETPOLL
+#define I40E_FLAG_RX_CSUM_ENABLED                I40EVF_FLAG_RX_CSUM_ENABLED
+	/* flags for admin queue service task */
+	u32 aq_required;
+	u32 aq_pending;
+#define I40EVF_FLAG_AQ_ENABLE_QUEUES		(u32)(1)
+#define I40EVF_FLAG_AQ_DISABLE_QUEUES		(u32)(1 << 1)
+#define I40EVF_FLAG_AQ_ADD_MAC_FILTER		(u32)(1 << 2)
+#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER		(u32)(1 << 3)
+#define I40EVF_FLAG_AQ_DEL_MAC_FILTER		(u32)(1 << 4)
+#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER		(u32)(1 << 5)
+#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES		(u32)(1 << 6)
+#define I40EVF_FLAG_AQ_MAP_VECTORS		(u32)(1 << 7)
+#define I40EVF_FLAG_AQ_HANDLE_RESET		(u32)(1 << 8)
+	/* OS defined structs */
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	struct net_device_stats net_stats;
+
+	/* structs defined in i40e_vf.h */
+	struct i40e_hw hw;
+
+	enum i40evf_state_t state;
+	volatile unsigned long crit_section;
+	u64 tx_busy;
+
+	struct work_struct watchdog_task;
+	bool netdev_registered;
+	bool dev_closed;
+	bool link_up;
+	enum i40e_virtchnl_ops current_op;
+	struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
+	struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
+	u16 msg_enable;
+	struct i40e_eth_stats current_stats;
+	struct i40e_vsi vsi;
+	u32 aq_wait_count;
+};
+
+struct i40evf_info {
+	enum i40e_mac_type	mac;
+	unsigned int		flags;
+};
+
+
+/* needed by i40evf_ethtool.c */
+extern char i40evf_driver_name[];
+extern const char i40evf_driver_version[];
+
+int i40evf_up(struct i40evf_adapter *adapter);
+void i40evf_down(struct i40evf_adapter *adapter);
+void i40evf_reinit_locked(struct i40evf_adapter *adapter);
+void i40evf_reset(struct i40evf_adapter *adapter);
+void i40evf_set_ethtool_ops(struct net_device *netdev);
+void i40evf_update_stats(struct i40evf_adapter *adapter);
+void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter);
+int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter);
+void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask);
+
+void i40e_napi_add_all(struct i40evf_adapter *adapter);
+void i40e_napi_del_all(struct i40evf_adapter *adapter);
+
+int i40evf_send_api_ver(struct i40evf_adapter *adapter);
+int i40evf_verify_api_ver(struct i40evf_adapter *adapter);
+int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter);
+int i40evf_get_vf_config(struct i40evf_adapter *adapter);
+void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush);
+void i40evf_configure_queues(struct i40evf_adapter *adapter);
+void i40evf_deconfigure_queues(struct i40evf_adapter *adapter);
+void i40evf_enable_queues(struct i40evf_adapter *adapter);
+void i40evf_disable_queues(struct i40evf_adapter *adapter);
+void i40evf_map_queues(struct i40evf_adapter *adapter);
+void i40evf_add_ether_addrs(struct i40evf_adapter *adapter);
+void i40evf_del_ether_addrs(struct i40evf_adapter *adapter);
+void i40evf_add_vlans(struct i40evf_adapter *adapter);
+void i40evf_del_vlans(struct i40evf_adapter *adapter);
+void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
+void i40evf_request_stats(struct i40evf_adapter *adapter);
+void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
+				enum i40e_virtchnl_ops v_opcode,
+				i40e_status v_retval, u8 *msg, u16 msglen);
+#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
new file mode 100644
index 000000000000..b0b1f4bf5ac0
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -0,0 +1,390 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* ethtool support for i40evf */
+#include "i40evf.h"
+
+#include <linux/uaccess.h>
+
+
+struct i40evf_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int stat_offset;
+};
+
+#define I40EVF_STAT(_name, _stat) { \
+	.stat_string = _name, \
+	.stat_offset = offsetof(struct i40evf_adapter, _stat) \
+}
+
+/* All stats are u64, so we don't need to track the size of the field. */
+static const struct i40evf_stats i40evf_gstrings_stats[] = {
+	I40EVF_STAT("rx_bytes", current_stats.rx_bytes),
+	I40EVF_STAT("rx_unicast", current_stats.rx_unicast),
+	I40EVF_STAT("rx_multicast", current_stats.rx_multicast),
+	I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast),
+	I40EVF_STAT("rx_discards", current_stats.rx_discards),
+	I40EVF_STAT("rx_errors", current_stats.rx_errors),
+	I40EVF_STAT("rx_missed", current_stats.rx_missed),
+	I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
+	I40EVF_STAT("tx_bytes", current_stats.tx_bytes),
+	I40EVF_STAT("tx_unicast", current_stats.tx_unicast),
+	I40EVF_STAT("tx_multicast", current_stats.tx_multicast),
+	I40EVF_STAT("tx_broadcast", current_stats.tx_broadcast),
+	I40EVF_STAT("tx_discards", current_stats.tx_discards),
+	I40EVF_STAT("tx_errors", current_stats.tx_errors),
+};
+
+#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
+#define I40EVF_QUEUE_STATS_LEN \
+	(((struct i40evf_adapter *) \
+		netdev_priv(netdev))->vsi_res->num_queue_pairs * 4)
+#define I40EVF_STATS_LEN (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN)
+
+/**
+ * i40evf_get_settings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @ecmd: ethtool command
+ *
+ * Reports speed/duplex settings. Because this is a VF, we don't know what
+ * kind of link we really have, so we fake it.
+ **/
+static int i40evf_get_settings(struct net_device *netdev,
+			       struct ethtool_cmd *ecmd)
+{
+	/* In the future the VF will be able to query the PF for
+	 * some information - for now use a dummy value
+	 */
+	ecmd->supported = SUPPORTED_10000baseT_Full;
+	ecmd->autoneg = AUTONEG_DISABLE;
+	ecmd->transceiver = XCVR_DUMMY1;
+	ecmd->port = PORT_NONE;
+
+	return 0;
+}
+
+/**
+ * i40evf_get_sset_count - Get length of string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ *
+ * Reports size of string table. This driver only supports
+ * strings for statistics.
+ **/
+static int i40evf_get_sset_count(struct net_device *netdev, int sset)
+{
+	if (sset == ETH_SS_STATS)
+		return I40EVF_STATS_LEN;
+	else
+		return -ENOTSUPP;
+}
+
+/**
+ * i40evf_get_ethtool_stats - report device statistics
+ * @netdev: network interface device structure
+ * @stats: ethtool statistics structure
+ * @data: pointer to data buffer
+ *
+ * All statistics are added to the data buffer as an array of u64.
+ **/
+static void i40evf_get_ethtool_stats(struct net_device *netdev,
+				     struct ethtool_stats *stats, u64 *data)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	int i, j;
+	char *p;
+
+	for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) {
+		p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset;
+		data[i] =  *(u64 *)p;
+	}
+	for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) {
+		data[i++] = adapter->tx_rings[j]->stats.packets;
+		data[i++] = adapter->tx_rings[j]->stats.bytes;
+	}
+	for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) {
+		data[i++] = adapter->rx_rings[j]->stats.packets;
+		data[i++] = adapter->rx_rings[j]->stats.bytes;
+	}
+}
+
+/**
+ * i40evf_get_strings - Get string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ * @data: buffer for string data
+ *
+ * Builds stats string table.
+ **/
+static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	u8 *p = data;
+	int i;
+
+	if (sset == ETH_SS_STATS) {
+		for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) {
+			memcpy(p, i40evf_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+			snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i);
+			p += ETH_GSTRING_LEN;
+			snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i);
+			p += ETH_GSTRING_LEN;
+		}
+		for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+			snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i);
+			p += ETH_GSTRING_LEN;
+			snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
+			p += ETH_GSTRING_LEN;
+		}
+	}
+}
+
+/**
+ * i40evf_get_msglevel - Get debug message level
+ * @netdev: network interface device structure
+ *
+ * Returns current debug message level.
+ **/
+static u32 i40evf_get_msglevel(struct net_device *netdev)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	return adapter->msg_enable;
+}
+
+/**
+ * i40evf_get_msglevel - Set debug message level
+ * @netdev: network interface device structure
+ * @data: message level
+ *
+ * Set current debug message level. Higher values cause the driver to
+ * be noisier.
+ **/
+static void i40evf_set_msglevel(struct net_device *netdev, u32 data)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	adapter->msg_enable = data;
+}
+
+/**
+ * i40evf_get_drvinto - Get driver info
+ * @netdev: network interface device structure
+ * @drvinfo: ethool driver info structure
+ *
+ * Returns information about the driver and device for display to the user.
+ **/
+static void i40evf_get_drvinfo(struct net_device *netdev,
+			       struct ethtool_drvinfo *drvinfo)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	strlcpy(drvinfo->driver, i40evf_driver_name, 32);
+	strlcpy(drvinfo->version, i40evf_driver_version, 32);
+
+	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+}
+
+/**
+ * i40evf_get_ringparam - Get ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ *
+ * Returns current ring parameters. TX and RX rings are reported separately,
+ * but the number of rings is not reported.
+ **/
+static void i40evf_get_ringparam(struct net_device *netdev,
+				  struct ethtool_ringparam *ring)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	struct i40e_ring *tx_ring = adapter->tx_rings[0];
+	struct i40e_ring *rx_ring = adapter->rx_rings[0];
+
+	ring->rx_max_pending = I40EVF_MAX_RXD;
+	ring->tx_max_pending = I40EVF_MAX_TXD;
+	ring->rx_pending = rx_ring->count;
+	ring->tx_pending = tx_ring->count;
+}
+
+/**
+ * i40evf_set_ringparam - Set ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ *
+ * Sets ring parameters. TX and RX rings are controlled separately, but the
+ * number of rings is not specified, so all rings get the same settings.
+ **/
+static int i40evf_set_ringparam(struct net_device *netdev,
+				struct ethtool_ringparam *ring)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	u32 new_rx_count, new_tx_count;
+
+	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+		return -EINVAL;
+
+	new_tx_count = clamp_t(u32, ring->tx_pending,
+			       I40EVF_MIN_TXD,
+			       I40EVF_MAX_TXD);
+	new_tx_count = ALIGN(new_tx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
+
+	new_rx_count = clamp_t(u32, ring->rx_pending,
+			       I40EVF_MIN_RXD,
+			       I40EVF_MAX_RXD);
+	new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
+
+	/* if nothing to do return success */
+	if ((new_tx_count == adapter->txd_count) &&
+	    (new_rx_count == adapter->rxd_count))
+		return 0;
+
+	adapter->txd_count = new_tx_count;
+	adapter->rxd_count = new_rx_count;
+
+	if (netif_running(netdev))
+		i40evf_reinit_locked(adapter);
+	return 0;
+}
+
+/**
+ * i40evf_get_coalesce - Get interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Returns current coalescing settings. This is referred to elsewhere in the
+ * driver as Interrupt Throttle Rate, as this is how the hardware describes
+ * this functionality.
+ **/
+static int i40evf_get_coalesce(struct net_device *netdev,
+			     struct ethtool_coalesce *ec)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	struct i40e_vsi *vsi = &adapter->vsi;
+
+	ec->tx_max_coalesced_frames = vsi->work_limit;
+	ec->rx_max_coalesced_frames = vsi->work_limit;
+
+	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
+		ec->rx_coalesce_usecs = 1;
+	else
+		ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+
+	if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+		ec->tx_coalesce_usecs = 1;
+	else
+		ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+
+	return 0;
+}
+
+/**
+ * i40evf_set_coalesce - Set interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Change current coalescing settings.
+ **/
+static int i40evf_set_coalesce(struct net_device *netdev,
+			     struct ethtool_coalesce *ec)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	struct i40e_hw *hw = &adapter->hw;
+	struct i40e_vsi *vsi = &adapter->vsi;
+	struct i40e_q_vector *q_vector;
+	int i;
+
+	if (ec->tx_max_coalesced_frames || ec->rx_max_coalesced_frames)
+		vsi->work_limit = ec->tx_max_coalesced_frames;
+
+	switch (ec->rx_coalesce_usecs) {
+	case 0:
+		vsi->rx_itr_setting = 0;
+		break;
+	case 1:
+		vsi->rx_itr_setting = (I40E_ITR_DYNAMIC
+				       | ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+		break;
+	default:
+		if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+		    (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+			return -EINVAL;
+		vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+		break;
+	}
+
+	switch (ec->tx_coalesce_usecs) {
+	case 0:
+		vsi->tx_itr_setting = 0;
+		break;
+	case 1:
+		vsi->tx_itr_setting = (I40E_ITR_DYNAMIC
+				       | ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+		break;
+	default:
+		if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+		    (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+			return -EINVAL;
+		vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+		break;
+	}
+
+	for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
+		q_vector = adapter->q_vector[i];
+		q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+		wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
+		q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+		wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
+		i40e_flush(hw);
+	}
+
+	return 0;
+}
+
+static struct ethtool_ops i40evf_ethtool_ops = {
+	.get_settings		= i40evf_get_settings,
+	.get_drvinfo		= i40evf_get_drvinfo,
+	.get_link		= ethtool_op_get_link,
+	.get_ringparam		= i40evf_get_ringparam,
+	.set_ringparam		= i40evf_set_ringparam,
+	.get_strings		= i40evf_get_strings,
+	.get_ethtool_stats	= i40evf_get_ethtool_stats,
+	.get_sset_count		= i40evf_get_sset_count,
+	.get_msglevel		= i40evf_get_msglevel,
+	.set_msglevel		= i40evf_set_msglevel,
+	.get_coalesce		= i40evf_get_coalesce,
+	.set_coalesce		= i40evf_set_coalesce,
+};
+
+/**
+ * i40evf_set_ethtool_ops - Initialize ethtool ops struct
+ * @netdev: network interface device structure
+ *
+ * Sets ethtool ops struct in our netdev so that ethtool can call
+ * our functions.
+ **/
+void i40evf_set_ethtool_ops(struct net_device *netdev)
+{
+	SET_ETHTOOL_OPS(netdev, &i40evf_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
new file mode 100644
index 000000000000..f5caf4419243
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -0,0 +1,2353 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
+static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
+static int i40evf_close(struct net_device *netdev);
+
+char i40evf_driver_name[] = "i40evf";
+static const char i40evf_driver_string[] =
+	"Intel(R) XL710 X710 Virtual Function Network Driver";
+
+#define DRV_VERSION "0.9.11"
+const char i40evf_driver_version[] = DRV_VERSION;
+static const char i40evf_copyright[] =
+	"Copyright (c) 2013 Intel Corporation.";
+
+/* i40evf_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ *   Class, Class Mask, private data (not used) }
+ */
+static DEFINE_PCI_DEVICE_TABLE(i40evf_pci_tbl) = {
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
+	/* required last entry */
+	{0, }
+};
+
+MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl);
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/**
+ * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ **/
+i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw,
+				      struct i40e_dma_mem *mem,
+				      u64 size, u32 alignment)
+{
+	struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
+
+	if (!mem)
+		return I40E_ERR_PARAM;
+
+	mem->size = ALIGN(size, alignment);
+	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
+				     (dma_addr_t *)&mem->pa, GFP_KERNEL);
+	if (mem->va)
+		return 0;
+	else
+		return I40E_ERR_NO_MEMORY;
+}
+
+/**
+ * i40evf_free_dma_mem_d - OS specific memory free for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to free
+ **/
+i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+{
+	struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
+
+	if (!mem || !mem->va)
+		return I40E_ERR_PARAM;
+	dma_free_coherent(&adapter->pdev->dev, mem->size,
+			  mem->va, (dma_addr_t)mem->pa);
+	return 0;
+}
+
+/**
+ * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to fill out
+ * @size: size of memory requested
+ **/
+i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw,
+				       struct i40e_virt_mem *mem, u32 size)
+{
+	if (!mem)
+		return I40E_ERR_PARAM;
+
+	mem->size = size;
+	mem->va = kzalloc(size, GFP_KERNEL);
+
+	if (mem->va)
+		return 0;
+	else
+		return I40E_ERR_NO_MEMORY;
+}
+
+/**
+ * i40evf_free_virt_mem_d - OS specific memory free for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to free
+ **/
+i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw,
+				   struct i40e_virt_mem *mem)
+{
+	if (!mem)
+		return I40E_ERR_PARAM;
+
+	/* it's ok to kfree a NULL pointer */
+	kfree(mem->va);
+
+	return 0;
+}
+
+/**
+ * i40evf_debug_d - OS dependent version of debug printing
+ * @hw:  pointer to the HW structure
+ * @mask: debug level mask
+ * @fmt_str: printf-type format description
+ **/
+void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
+{
+	char buf[512];
+	va_list argptr;
+
+	if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
+		return;
+
+	va_start(argptr, fmt_str);
+	vsnprintf(buf, sizeof(buf), fmt_str, argptr);
+	va_end(argptr);
+
+	/* the debug string is already formatted with a newline */
+	pr_info("%s", buf);
+}
+
+/**
+ * i40evf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void i40evf_tx_timeout(struct net_device *netdev)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	adapter->tx_timeout_count++;
+
+	/* Do the reset outside of interrupt context */
+	schedule_work(&adapter->reset_task);
+}
+
+/**
+ * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
+{
+	struct i40e_hw *hw = &adapter->hw;
+	wr32(hw, I40E_VFINT_DYN_CTL01, 0);
+
+	/* read flush */
+	rd32(hw, I40E_VFGEN_RSTAT);
+
+	synchronize_irq(adapter->msix_entries[0].vector);
+}
+
+/**
+ * i40evf_misc_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
+{
+	struct i40e_hw *hw = &adapter->hw;
+	wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
+				       I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
+	wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
+
+	/* read flush */
+	rd32(hw, I40E_VFGEN_RSTAT);
+}
+
+/**
+ * i40evf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static void i40evf_irq_disable(struct i40evf_adapter *adapter)
+{
+	int i;
+	struct i40e_hw *hw = &adapter->hw;
+
+	for (i = 1; i < adapter->num_msix_vectors; i++) {
+		wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
+		synchronize_irq(adapter->msix_entries[i].vector);
+	}
+	/* read flush */
+	rd32(hw, I40E_VFGEN_RSTAT);
+
+}
+
+/**
+ * i40evf_irq_enable_queues - Enable interrupt for specified queues
+ * @adapter: board private structure
+ * @mask: bitmap of queues to enable
+ **/
+void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
+{
+	struct i40e_hw *hw = &adapter->hw;
+	int i;
+
+	for (i = 1; i < adapter->num_msix_vectors; i++) {
+		if (mask & (1 << (i - 1))) {
+			wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
+			     I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+			     I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+		}
+	}
+}
+
+/**
+ * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
+ * @adapter: board private structure
+ * @mask: bitmap of vectors to trigger
+ **/
+static void i40evf_fire_sw_int(struct i40evf_adapter *adapter,
+					    u32 mask)
+{
+	struct i40e_hw *hw = &adapter->hw;
+	int i;
+	uint32_t dyn_ctl;
+
+	for (i = 1; i < adapter->num_msix_vectors; i++) {
+		if (mask & (1 << i)) {
+			dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
+			dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+				   I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
+			wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
+		}
+	}
+}
+
+/**
+ * i40evf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
+{
+	struct i40e_hw *hw = &adapter->hw;
+
+	i40evf_irq_enable_queues(adapter, ~0);
+
+	if (flush)
+		rd32(hw, I40E_VFGEN_RSTAT);
+}
+
+/**
+ * i40evf_msix_aq - Interrupt handler for vector 0
+ * @irq: interrupt number
+ * @data: pointer to netdev
+ **/
+static irqreturn_t i40evf_msix_aq(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	struct i40e_hw *hw = &adapter->hw;
+	u32 val;
+	u32 ena_mask;
+
+	/* handle non-queue interrupts */
+	val = rd32(hw, I40E_VFINT_ICR01);
+	ena_mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
+
+
+	val = rd32(hw, I40E_VFINT_DYN_CTL01);
+	val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+	wr32(hw, I40E_VFINT_DYN_CTL01, val);
+
+	/* re-enable interrupt causes */
+	wr32(hw, I40E_VFINT_ICR0_ENA1, ena_mask);
+	wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
+
+	/* schedule work on the private workqueue */
+	schedule_work(&adapter->adminq_task);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
+{
+	struct i40e_q_vector *q_vector = data;
+
+	if (!q_vector->tx.ring && !q_vector->rx.ring)
+		return IRQ_HANDLED;
+
+	napi_schedule(&q_vector->napi);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * i40evf_map_vector_to_rxq - associate irqs with rx queues
+ * @adapter: board private structure
+ * @v_idx: interrupt number
+ * @r_idx: queue number
+ **/
+static void
+i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
+{
+	struct i40e_q_vector *q_vector = adapter->q_vector[v_idx];
+	struct i40e_ring *rx_ring = adapter->rx_rings[r_idx];
+
+	rx_ring->q_vector = q_vector;
+	rx_ring->next = q_vector->rx.ring;
+	rx_ring->vsi = &adapter->vsi;
+	q_vector->rx.ring = rx_ring;
+	q_vector->rx.count++;
+	q_vector->rx.latency_range = I40E_LOW_LATENCY;
+}
+
+/**
+ * i40evf_map_vector_to_txq - associate irqs with tx queues
+ * @adapter: board private structure
+ * @v_idx: interrupt number
+ * @t_idx: queue number
+ **/
+static void
+i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
+{
+	struct i40e_q_vector *q_vector = adapter->q_vector[v_idx];
+	struct i40e_ring *tx_ring = adapter->tx_rings[t_idx];
+
+	tx_ring->q_vector = q_vector;
+	tx_ring->next = q_vector->tx.ring;
+	tx_ring->vsi = &adapter->vsi;
+	q_vector->tx.ring = tx_ring;
+	q_vector->tx.count++;
+	q_vector->tx.latency_range = I40E_LOW_LATENCY;
+	q_vector->num_ringpairs++;
+	q_vector->ring_mask |= (1 << t_idx);
+}
+
+/**
+ * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code.  Ideally, we'd have
+ * one vector per ring/queue, but on a constrained vector budget, we
+ * group the rings as "efficiently" as possible.  You would add new
+ * mapping configurations in here.
+ **/
+static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
+{
+	int q_vectors;
+	int v_start = 0;
+	int rxr_idx = 0, txr_idx = 0;
+	int rxr_remaining = adapter->vsi_res->num_queue_pairs;
+	int txr_remaining = adapter->vsi_res->num_queue_pairs;
+	int i, j;
+	int rqpv, tqpv;
+	int err = 0;
+
+	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+	/* The ideal configuration...
+	 * We have enough vectors to map one per queue.
+	 */
+	if (q_vectors == (rxr_remaining * 2)) {
+		for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
+			i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
+
+		for (; txr_idx < txr_remaining; v_start++, txr_idx++)
+			i40evf_map_vector_to_txq(adapter, v_start, txr_idx);
+		goto out;
+	}
+
+	/* If we don't have enough vectors for a 1-to-1
+	 * mapping, we'll have to group them so there are
+	 * multiple queues per vector.
+	 * Re-adjusting *qpv takes care of the remainder.
+	 */
+	for (i = v_start; i < q_vectors; i++) {
+		rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
+		for (j = 0; j < rqpv; j++) {
+			i40evf_map_vector_to_rxq(adapter, i, rxr_idx);
+			rxr_idx++;
+			rxr_remaining--;
+		}
+	}
+	for (i = v_start; i < q_vectors; i++) {
+		tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
+		for (j = 0; j < tqpv; j++) {
+			i40evf_map_vector_to_txq(adapter, i, txr_idx);
+			txr_idx++;
+			txr_remaining--;
+		}
+	}
+
+out:
+	adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
+
+	return err;
+}
+
+/**
+ * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Allocates MSI-X vectors for tx and rx handling, and requests
+ * interrupts from the kernel.
+ **/
+static int
+i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
+{
+	int vector, err, q_vectors;
+	int rx_int_idx = 0, tx_int_idx = 0;
+
+	i40evf_irq_disable(adapter);
+	/* Decrement for Other and TCP Timer vectors */
+	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+	for (vector = 0; vector < q_vectors; vector++) {
+		struct i40e_q_vector *q_vector = adapter->q_vector[vector];
+
+		if (q_vector->tx.ring && q_vector->rx.ring) {
+			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+				 "i40evf-%s-%s-%d", basename,
+				 "TxRx", rx_int_idx++);
+			tx_int_idx++;
+		} else if (q_vector->rx.ring) {
+			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+				 "i40evf-%s-%s-%d", basename,
+				 "rx", rx_int_idx++);
+		} else if (q_vector->tx.ring) {
+			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+				 "i40evf-%s-%s-%d", basename,
+				 "tx", tx_int_idx++);
+		} else {
+			/* skip this unused q_vector */
+			continue;
+		}
+		err = request_irq(
+			adapter->msix_entries[vector + NONQ_VECS].vector,
+			i40evf_msix_clean_rings,
+			0,
+			q_vector->name,
+			q_vector);
+		if (err) {
+			dev_info(&adapter->pdev->dev,
+				 "%s: request_irq failed, error: %d\n",
+				__func__, err);
+			goto free_queue_irqs;
+		}
+		/* assign the mask for this irq */
+		irq_set_affinity_hint(
+			adapter->msix_entries[vector + NONQ_VECS].vector,
+			q_vector->affinity_mask);
+	}
+
+	return 0;
+
+free_queue_irqs:
+	while (vector) {
+		vector--;
+		irq_set_affinity_hint(
+			adapter->msix_entries[vector + NONQ_VECS].vector,
+			NULL);
+		free_irq(adapter->msix_entries[vector + NONQ_VECS].vector,
+			 adapter->q_vector[vector]);
+	}
+	return err;
+}
+
+/**
+ * i40evf_request_misc_irq - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
+ * vector is only for the admin queue, and stays active even when the netdev
+ * is closed.
+ **/
+static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int err;
+
+	sprintf(adapter->name[0], "i40evf:mbx");
+	err = request_irq(adapter->msix_entries[0].vector,
+			  &i40evf_msix_aq, 0, adapter->name[0], netdev);
+	if (err) {
+		dev_err(&adapter->pdev->dev,
+			"request_irq for msix_aq failed: %d\n", err);
+		free_irq(adapter->msix_entries[0].vector, netdev);
+	}
+	return err;
+}
+
+/**
+ * i40evf_free_traffic_irqs - Free MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Frees all MSI-X vectors other than 0.
+ **/
+static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
+{
+	int i;
+	int q_vectors;
+	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+	for (i = 0; i < q_vectors; i++) {
+		irq_set_affinity_hint(adapter->msix_entries[i+1].vector,
+				      NULL);
+		free_irq(adapter->msix_entries[i+1].vector,
+			 adapter->q_vector[i]);
+	}
+}
+
+/**
+ * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
+ * @adapter: board private structure
+ *
+ * Frees MSI-X vector 0.
+ **/
+static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	free_irq(adapter->msix_entries[0].vector, netdev);
+}
+
+/**
+ * i40evf_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void i40evf_configure_tx(struct i40evf_adapter *adapter)
+{
+	struct i40e_hw *hw = &adapter->hw;
+	int i;
+	for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+		adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i);
+}
+
+/**
+ * i40evf_configure_rx - Configure Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void i40evf_configure_rx(struct i40evf_adapter *adapter)
+{
+	struct i40e_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+	int i;
+	int rx_buf_len;
+
+
+	adapter->flags &= ~I40EVF_FLAG_RX_PS_CAPABLE;
+	adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE;
+
+	/* Decide whether to use packet split mode or not */
+	if (netdev->mtu > ETH_DATA_LEN) {
+		if (adapter->flags & I40EVF_FLAG_RX_PS_CAPABLE)
+			adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
+		else
+			adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
+	} else {
+		if (adapter->flags & I40EVF_FLAG_RX_1BUF_CAPABLE)
+			adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
+		else
+			adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
+	}
+
+	/* Set the RX buffer length according to the mode */
+	if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
+		rx_buf_len = I40E_RX_HDR_SIZE;
+	} else {
+		if (netdev->mtu <= ETH_DATA_LEN)
+			rx_buf_len = I40EVF_RXBUFFER_2048;
+		else
+			rx_buf_len = ALIGN(max_frame, 1024);
+	}
+
+	for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+		adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i);
+		adapter->rx_rings[i]->rx_buf_len = rx_buf_len;
+	}
+}
+
+/**
+ * i40evf_find_vlan - Search filter list for specific vlan filter
+ * @adapter: board private structure
+ * @vlan: vlan tag
+ *
+ * Returns ptr to the filter object or NULL
+ **/
+static struct
+i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+	struct i40evf_vlan_filter *f;
+
+	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+		if (vlan == f->vlan)
+			return f;
+	}
+	return NULL;
+}
+
+/**
+ * i40evf_add_vlan - Add a vlan filter to the list
+ * @adapter: board private structure
+ * @vlan: VLAN tag
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ **/
+static struct
+i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+	struct i40evf_vlan_filter *f;
+
+	f = i40evf_find_vlan(adapter, vlan);
+	if (NULL == f) {
+		f = kzalloc(sizeof(*f), GFP_ATOMIC);
+		if (NULL == f) {
+			dev_info(&adapter->pdev->dev,
+				 "%s: no memory for new VLAN filter\n",
+				 __func__);
+			return NULL;
+		}
+		f->vlan = vlan;
+
+		INIT_LIST_HEAD(&f->list);
+		list_add(&f->list, &adapter->vlan_filter_list);
+		f->add = true;
+		adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+	}
+
+	return f;
+}
+
+/**
+ * i40evf_del_vlan - Remove a vlan filter from the list
+ * @adapter: board private structure
+ * @vlan: VLAN tag
+ **/
+static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+	struct i40evf_vlan_filter *f;
+
+	f = i40evf_find_vlan(adapter, vlan);
+	if (f) {
+		f->remove = true;
+		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+	}
+	return;
+}
+
+/**
+ * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
+ * @netdev: network device struct
+ * @vid: VLAN tag
+ **/
+static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
+			 __always_unused __be16 proto, u16 vid)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	if (i40evf_add_vlan(adapter, vid) == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+/**
+ * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
+ * @netdev: network device struct
+ * @vid: VLAN tag
+ **/
+static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
+			  __always_unused __be16 proto, u16 vid)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	i40evf_del_vlan(adapter, vid);
+	return 0;
+}
+
+/**
+ * i40evf_find_filter - Search filter list for specific mac filter
+ * @adapter: board private structure
+ * @macaddr: the MAC address
+ *
+ * Returns ptr to the filter object or NULL
+ **/
+static struct
+i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
+				      u8 *macaddr)
+{
+	struct i40evf_mac_filter *f;
+
+	if (!macaddr)
+		return NULL;
+
+	list_for_each_entry(f, &adapter->mac_filter_list, list) {
+		if (ether_addr_equal(macaddr, f->macaddr))
+			return f;
+	}
+	return NULL;
+}
+
+/**
+ * i40e_add_filter - Add a mac filter to the filter list
+ * @adapter: board private structure
+ * @macaddr: the MAC address
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ **/
+static struct
+i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
+				     u8 *macaddr)
+{
+	struct i40evf_mac_filter *f;
+
+	if (!macaddr)
+		return NULL;
+
+	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+				&adapter->crit_section))
+		mdelay(1);
+
+	f = i40evf_find_filter(adapter, macaddr);
+	if (NULL == f) {
+		f = kzalloc(sizeof(*f), GFP_ATOMIC);
+		if (NULL == f) {
+			dev_info(&adapter->pdev->dev,
+				 "%s: no memory for new filter\n", __func__);
+			clear_bit(__I40EVF_IN_CRITICAL_TASK,
+				  &adapter->crit_section);
+			return NULL;
+		}
+
+		memcpy(f->macaddr, macaddr, ETH_ALEN);
+
+		list_add(&f->list, &adapter->mac_filter_list);
+		f->add = true;
+		adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+	}
+
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+	return f;
+}
+
+/**
+ * i40evf_set_mac - NDO callback to set port mac address
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40evf_set_mac(struct net_device *netdev, void *p)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	struct i40e_hw *hw = &adapter->hw;
+	struct i40evf_mac_filter *f;
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
+		return 0;
+
+	f = i40evf_add_filter(adapter, addr->sa_data);
+	if (f) {
+		memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+		memcpy(netdev->dev_addr, adapter->hw.mac.addr,
+		       netdev->addr_len);
+	}
+
+	return (f == NULL) ? -ENOMEM : 0;
+}
+
+/**
+ * i40evf_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+ **/
+static void i40evf_set_rx_mode(struct net_device *netdev)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	struct i40evf_mac_filter *f, *ftmp;
+	struct netdev_hw_addr *uca;
+	struct netdev_hw_addr *mca;
+
+	/* add addr if not already in the filter list */
+	netdev_for_each_uc_addr(uca, netdev) {
+		i40evf_add_filter(adapter, uca->addr);
+	}
+	netdev_for_each_mc_addr(mca, netdev) {
+		i40evf_add_filter(adapter, mca->addr);
+	}
+
+	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+				&adapter->crit_section))
+		mdelay(1);
+	/* remove filter if not in netdev list */
+	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+		bool found = false;
+
+		if (f->macaddr[0] & 0x01) {
+			netdev_for_each_mc_addr(mca, netdev) {
+				if (ether_addr_equal(mca->addr, f->macaddr)) {
+					found = true;
+					break;
+				}
+			}
+		} else {
+			netdev_for_each_uc_addr(uca, netdev) {
+				if (ether_addr_equal(uca->addr, f->macaddr)) {
+					found = true;
+					break;
+				}
+			}
+		}
+		if (found) {
+			f->remove = true;
+			adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+		}
+	}
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+}
+
+/**
+ * i40evf_napi_enable_all - enable NAPI on all queue vectors
+ * @adapter: board private structure
+ **/
+static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
+{
+	int q_idx;
+	struct i40e_q_vector *q_vector;
+	int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+		struct napi_struct *napi;
+		q_vector = adapter->q_vector[q_idx];
+		napi = &q_vector->napi;
+		napi_enable(napi);
+	}
+}
+
+/**
+ * i40evf_napi_disable_all - disable NAPI on all queue vectors
+ * @adapter: board private structure
+ **/
+static void i40evf_napi_disable_all(struct i40evf_adapter *adapter)
+{
+	int q_idx;
+	struct i40e_q_vector *q_vector;
+	int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+		q_vector = adapter->q_vector[q_idx];
+		napi_disable(&q_vector->napi);
+	}
+}
+
+/**
+ * i40evf_configure - set up transmit and receive data structures
+ * @adapter: board private structure
+ **/
+static void i40evf_configure(struct i40evf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i;
+
+	i40evf_set_rx_mode(netdev);
+
+	i40evf_configure_tx(adapter);
+	i40evf_configure_rx(adapter);
+	adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+
+	for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+		struct i40e_ring *ring = adapter->rx_rings[i];
+		i40evf_alloc_rx_buffers(ring, ring->count);
+		ring->next_to_use = ring->count - 1;
+		writel(ring->next_to_use, ring->tail);
+	}
+}
+
+/**
+ * i40evf_up_complete - Finish the last steps of bringing up a connection
+ * @adapter: board private structure
+ **/
+static int i40evf_up_complete(struct i40evf_adapter *adapter)
+{
+	adapter->state = __I40EVF_RUNNING;
+	clear_bit(__I40E_DOWN, &adapter->vsi.state);
+
+	i40evf_napi_enable_all(adapter);
+
+	adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
+	mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+	return 0;
+}
+
+/**
+ * i40evf_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+		i40evf_clean_rx_ring(adapter->rx_rings[i]);
+}
+
+/**
+ * i40evf_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+		i40evf_clean_tx_ring(adapter->tx_rings[i]);
+}
+
+/**
+ * i40e_down - Shutdown the connection processing
+ * @adapter: board private structure
+ **/
+void i40evf_down(struct i40evf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct i40evf_mac_filter *f;
+
+	/* remove all MAC filters from the VSI */
+	list_for_each_entry(f, &adapter->mac_filter_list, list) {
+		f->remove = true;
+	}
+	adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+	/* disable receives */
+	adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
+	mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+	msleep(20);
+
+	netif_tx_disable(netdev);
+
+	netif_tx_stop_all_queues(netdev);
+
+	i40evf_irq_disable(adapter);
+
+	i40evf_napi_disable_all(adapter);
+
+	netif_carrier_off(netdev);
+
+	i40evf_clean_all_tx_rings(adapter);
+	i40evf_clean_all_rx_rings(adapter);
+}
+
+/**
+ * i40evf_acquire_msix_vectors - Setup the MSIX capability
+ * @adapter: board private structure
+ * @vectors: number of vectors to request
+ *
+ * Work with the OS to set up the MSIX vectors needed.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int
+i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
+{
+	int err, vector_threshold;
+
+	/* We'll want at least 3 (vector_threshold):
+	 * 0) Other (Admin Queue and link, mostly)
+	 * 1) TxQ[0] Cleanup
+	 * 2) RxQ[0] Cleanup
+	 */
+	vector_threshold = MIN_MSIX_COUNT;
+
+	/* The more we get, the more we will assign to Tx/Rx Cleanup
+	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+	 * Right now, we simply care about how many we'll get; we'll
+	 * set them up later while requesting irq's.
+	 */
+	while (vectors >= vector_threshold) {
+		err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+				      vectors);
+		if (!err) /* Success in acquiring all requested vectors. */
+			break;
+		else if (err < 0)
+			vectors = 0; /* Nasty failure, quit now */
+		else /* err == number of vectors we should try again with */
+			vectors = err;
+	}
+
+	if (vectors < vector_threshold) {
+		dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts.\n");
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+		err = -EIO;
+	} else {
+		/* Adjust for only the vectors we'll use, which is minimum
+		 * of max_msix_q_vectors + NONQ_VECS, or the number of
+		 * vectors we were allocated.
+		 */
+		adapter->num_msix_vectors = vectors;
+	}
+	return err;
+}
+
+/**
+ * i40evf_free_queues - Free memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * Free all of the memory associated with queue pairs.
+ **/
+static void i40evf_free_queues(struct i40evf_adapter *adapter)
+{
+	int i;
+
+	if (!adapter->vsi_res)
+		return;
+	for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+		if (adapter->tx_rings[i])
+			kfree_rcu(adapter->tx_rings[i], rcu);
+		adapter->tx_rings[i] = NULL;
+		adapter->rx_rings[i] = NULL;
+	}
+}
+
+/**
+ * i40evf_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time.  The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+		struct i40e_ring *tx_ring;
+		struct i40e_ring *rx_ring;
+
+		tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
+		if (!tx_ring)
+			goto err_out;
+
+		tx_ring->queue_index = i;
+		tx_ring->netdev = adapter->netdev;
+		tx_ring->dev = &adapter->pdev->dev;
+		tx_ring->count = I40EVF_DEFAULT_TXD;
+		adapter->tx_rings[i] = tx_ring;
+
+		rx_ring = &tx_ring[1];
+		rx_ring->queue_index = i;
+		rx_ring->netdev = adapter->netdev;
+		rx_ring->dev = &adapter->pdev->dev;
+		rx_ring->count = I40EVF_DEFAULT_RXD;
+		adapter->rx_rings[i] = rx_ring;
+	}
+
+	return 0;
+
+err_out:
+	i40evf_free_queues(adapter);
+	return -ENOMEM;
+}
+
+/**
+ * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
+{
+	int vector, v_budget;
+	int pairs = 0;
+	int err = 0;
+
+	if (!adapter->vsi_res) {
+		err = -EIO;
+		goto out;
+	}
+	pairs = adapter->vsi_res->num_queue_pairs;
+
+	/* It's easy to be greedy for MSI-X vectors, but it really
+	 * doesn't do us much good if we have a lot more vectors
+	 * than CPU's.  So let's be conservative and only ask for
+	 * (roughly) twice the number of vectors as there are CPU's.
+	 */
+	v_budget = min(pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
+	v_budget = min(v_budget, (int)adapter->vf_res->max_vectors + 1);
+
+	/* A failure in MSI-X entry allocation isn't fatal, but it does
+	 * mean we disable MSI-X capabilities of the adapter.
+	 */
+	adapter->msix_entries = kcalloc(v_budget,
+					sizeof(struct msix_entry), GFP_KERNEL);
+	if (!adapter->msix_entries) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	for (vector = 0; vector < v_budget; vector++)
+		adapter->msix_entries[vector].entry = vector;
+
+	i40evf_acquire_msix_vectors(adapter, v_budget);
+
+out:
+	adapter->netdev->real_num_tx_queues = pairs;
+	return err;
+}
+
+/**
+ * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
+{
+	int q_idx, num_q_vectors;
+	struct i40e_q_vector *q_vector;
+
+	num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+		q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+		if (!q_vector)
+			goto err_out;
+		q_vector->adapter = adapter;
+		q_vector->vsi = &adapter->vsi;
+		q_vector->v_idx = q_idx;
+		netif_napi_add(adapter->netdev, &q_vector->napi,
+				       i40evf_napi_poll, 64);
+		adapter->q_vector[q_idx] = q_vector;
+	}
+
+	return 0;
+
+err_out:
+	while (q_idx) {
+		q_idx--;
+		q_vector = adapter->q_vector[q_idx];
+		netif_napi_del(&q_vector->napi);
+		kfree(q_vector);
+		adapter->q_vector[q_idx] = NULL;
+	}
+	return -ENOMEM;
+}
+
+/**
+ * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
+{
+	int q_idx, num_q_vectors;
+	int napi_vectors;
+
+	num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+	napi_vectors = adapter->vsi_res->num_queue_pairs;
+
+	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+		struct i40e_q_vector *q_vector = adapter->q_vector[q_idx];
+
+		adapter->q_vector[q_idx] = NULL;
+		if (q_idx < napi_vectors)
+			netif_napi_del(&q_vector->napi);
+		kfree(q_vector);
+	}
+}
+
+/**
+ * i40evf_reset_interrupt_capability - Reset MSIX setup
+ * @adapter: board private structure
+ *
+ **/
+void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
+{
+	pci_disable_msix(adapter->pdev);
+	kfree(adapter->msix_entries);
+	adapter->msix_entries = NULL;
+
+	return;
+}
+
+/**
+ * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
+ * @adapter: board private structure to initialize
+ *
+ **/
+int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
+{
+	int err;
+
+	err = i40evf_set_interrupt_capability(adapter);
+	if (err) {
+		dev_err(&adapter->pdev->dev,
+			"Unable to setup interrupt capabilities\n");
+		goto err_set_interrupt;
+	}
+
+	err = i40evf_alloc_q_vectors(adapter);
+	if (err) {
+		dev_err(&adapter->pdev->dev,
+			"Unable to allocate memory for queue vectors\n");
+		goto err_alloc_q_vectors;
+	}
+
+	err = i40evf_alloc_queues(adapter);
+	if (err) {
+		dev_err(&adapter->pdev->dev,
+			"Unable to allocate memory for queues\n");
+		goto err_alloc_queues;
+	}
+
+	dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
+		(adapter->vsi_res->num_queue_pairs > 1) ? "Enabled" :
+		"Disabled", adapter->vsi_res->num_queue_pairs);
+
+	return 0;
+err_alloc_queues:
+	i40evf_free_q_vectors(adapter);
+err_alloc_q_vectors:
+	i40evf_reset_interrupt_capability(adapter);
+err_set_interrupt:
+	return err;
+}
+
+/**
+ * i40evf_watchdog_timer - Periodic call-back timer
+ * @data: pointer to adapter disguised as unsigned long
+ **/
+static void i40evf_watchdog_timer(unsigned long data)
+{
+	struct i40evf_adapter *adapter = (struct i40evf_adapter *)data;
+	schedule_work(&adapter->watchdog_task);
+	/* timer will be rescheduled in watchdog task */
+}
+
+/**
+ * i40evf_watchdog_task - Periodic call-back task
+ * @work: pointer to work_struct
+ **/
+static void i40evf_watchdog_task(struct work_struct *work)
+{
+	struct i40evf_adapter *adapter = container_of(work,
+					  struct i40evf_adapter,
+					  watchdog_task);
+	struct i40e_hw *hw = &adapter->hw;
+
+	if (adapter->state < __I40EVF_DOWN)
+		goto watchdog_done;
+
+	if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
+		goto watchdog_done;
+
+	/* check for unannounced reset */
+	if ((adapter->state != __I40EVF_RESETTING) &&
+	    (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
+		adapter->state = __I40EVF_RESETTING;
+		schedule_work(&adapter->reset_task);
+		dev_info(&adapter->pdev->dev, "%s: hardware reset detected\n",
+			 __func__);
+		goto watchdog_done;
+	}
+
+	/* Process admin queue tasks. After init, everything gets done
+	 * here so we don't race on the admin queue.
+	 */
+	if (adapter->aq_pending)
+		goto watchdog_done;
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
+		i40evf_map_queues(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) {
+		i40evf_add_ether_addrs(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) {
+		i40evf_add_vlans(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) {
+		i40evf_del_ether_addrs(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) {
+		i40evf_del_vlans(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
+		i40evf_disable_queues(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
+		i40evf_configure_queues(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) {
+		i40evf_enable_queues(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->state == __I40EVF_RUNNING)
+		i40evf_request_stats(adapter);
+
+	i40evf_irq_enable(adapter, true);
+	i40evf_fire_sw_int(adapter, 0xFF);
+watchdog_done:
+	if (adapter->aq_required)
+		mod_timer(&adapter->watchdog_timer,
+			  jiffies + msecs_to_jiffies(20));
+	else
+		mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+	schedule_work(&adapter->adminq_task);
+}
+
+/**
+ * i40evf_configure_rss - Prepare for RSS if used
+ * @adapter: board private structure
+ **/
+static void i40evf_configure_rss(struct i40evf_adapter *adapter)
+{
+	struct i40e_hw *hw = &adapter->hw;
+	u32 lut = 0;
+	int i, j;
+	u64 hena;
+
+	/* Set of random keys generated using kernel random number generator */
+	static const u32 seed[I40E_VFQF_HKEY_MAX_INDEX + 1] = {
+			0x794221b4, 0xbca0c5ab, 0x6cd5ebd9, 0x1ada6127,
+			0x983b3aa1, 0x1c4e71eb, 0x7f6328b2, 0xfcdc0da0,
+			0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e,
+			0x4954b126 };
+
+	/* Hash type is configured by the PF - we just supply the key */
+
+	/* Fill out hash function seed */
+	for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+		wr32(hw, I40E_VFQF_HKEY(i), seed[i]);
+
+	/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
+	hena = I40E_DEFAULT_RSS_HENA;
+	wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+	wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+
+	/* Populate the LUT with max no. of queues in round robin fashion */
+	for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
+		if (j == adapter->vsi_res->num_queue_pairs)
+			j = 0;
+		/* lut = 4-byte sliding window of 4 lut entries */
+		lut = (lut << 8) | (j &
+			 ((0x1 << 8) - 1));
+		/* On i = 3, we have 4 entries in lut; write to the register */
+		if ((i & 3) == 3)
+			wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
+	}
+	i40e_flush(hw);
+}
+
+/**
+ * i40evf_reset_task - Call-back task to handle hardware reset
+ * @work: pointer to work_struct
+ *
+ * During reset we need to shut down and reinitialize the admin queue
+ * before we can use it to communicate with the PF again. We also clear
+ * and reinit the rings because that context is lost as well.
+ **/
+static void i40evf_reset_task(struct work_struct *work)
+{
+	struct i40evf_adapter *adapter =
+			container_of(work, struct i40evf_adapter, reset_task);
+	struct i40e_hw *hw = &adapter->hw;
+	int i = 0, err;
+	uint32_t rstat_val;
+
+	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+				&adapter->crit_section))
+		udelay(500);
+
+	/* wait until the reset is complete */
+	for (i = 0; i < 20; i++) {
+		rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
+			    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+		if (rstat_val == I40E_VFR_COMPLETED)
+			break;
+		else
+			mdelay(100);
+	}
+	if (i == 20) {
+		/* reset never finished */
+		dev_info(&adapter->pdev->dev, "%s: reset never finished: %x\n",
+			__func__, rstat_val);
+		/* carry on anyway */
+	}
+	i40evf_down(adapter);
+	adapter->state = __I40EVF_RESETTING;
+
+	/* kill and reinit the admin queue */
+	if (i40evf_shutdown_adminq(hw))
+		dev_warn(&adapter->pdev->dev,
+			"%s: Failed to destroy the Admin Queue resources\n",
+			__func__);
+	err = i40evf_init_adminq(hw);
+	if (err)
+		dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n",
+			__func__, err);
+
+	adapter->aq_pending = 0;
+	adapter->aq_required = 0;
+	i40evf_map_queues(adapter);
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+	mod_timer(&adapter->watchdog_timer, jiffies + 2);
+
+	if (netif_running(adapter->netdev)) {
+		/* allocate transmit descriptors */
+		err = i40evf_setup_all_tx_resources(adapter);
+		if (err)
+			goto reset_err;
+
+		/* allocate receive descriptors */
+		err = i40evf_setup_all_rx_resources(adapter);
+		if (err)
+			goto reset_err;
+
+		i40evf_configure(adapter);
+
+		err = i40evf_up_complete(adapter);
+		if (err)
+			goto reset_err;
+
+		i40evf_irq_enable(adapter, true);
+	}
+	return;
+reset_err:
+	dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n");
+	i40evf_close(adapter->netdev);
+}
+
+/**
+ * i40evf_adminq_task - worker thread to clean the admin queue
+ * @work: pointer to work_struct containing our data
+ **/
+static void i40evf_adminq_task(struct work_struct *work)
+{
+	struct i40evf_adapter *adapter =
+		container_of(work, struct i40evf_adapter, adminq_task);
+	struct i40e_hw *hw = &adapter->hw;
+	struct i40e_arq_event_info event;
+	struct i40e_virtchnl_msg *v_msg;
+	i40e_status ret;
+	u16 pending;
+
+	event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
+	event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+	if (!event.msg_buf) {
+		dev_info(&adapter->pdev->dev, "%s: no memory for ARQ clean\n",
+				 __func__);
+		return;
+	}
+	v_msg = (struct i40e_virtchnl_msg *)&event.desc;
+	do {
+		ret = i40evf_clean_arq_element(hw, &event, &pending);
+		if (ret)
+			break; /* No event to process or error cleaning ARQ */
+
+		i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
+					   v_msg->v_retval, event.msg_buf,
+					   event.msg_size);
+		if (pending != 0) {
+			dev_info(&adapter->pdev->dev,
+				 "%s: ARQ: Pending events %d\n",
+				 __func__, pending);
+			memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
+		}
+	} while (pending);
+
+	/* re-enable Admin queue interrupt cause */
+	i40evf_misc_irq_enable(adapter);
+
+	kfree(event.msg_buf);
+}
+
+/**
+ * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+		if (adapter->tx_rings[i]->desc)
+			i40evf_free_tx_resources(adapter->tx_rings[i]);
+
+}
+
+/**
+ * i40evf_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+		err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]);
+		if (!err)
+			continue;
+		dev_err(&adapter->pdev->dev,
+			"%s: Allocation for Tx Queue %u failed\n",
+			__func__, i);
+		break;
+	}
+
+	return err;
+}
+
+/**
+ * i40evf_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+		err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]);
+		if (!err)
+			continue;
+		dev_err(&adapter->pdev->dev,
+			"%s: Allocation for Rx Queue %u failed\n",
+			__func__, i);
+		break;
+	}
+	return err;
+}
+
+/**
+ * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+		if (adapter->rx_rings[i]->desc)
+			i40evf_free_rx_resources(adapter->rx_rings[i]);
+}
+
+/**
+ * i40evf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int i40evf_open(struct net_device *netdev)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	int err;
+
+	if (adapter->state != __I40EVF_DOWN)
+		return -EBUSY;
+
+	/* allocate transmit descriptors */
+	err = i40evf_setup_all_tx_resources(adapter);
+	if (err)
+		goto err_setup_tx;
+
+	/* allocate receive descriptors */
+	err = i40evf_setup_all_rx_resources(adapter);
+	if (err)
+		goto err_setup_rx;
+
+	/* clear any pending interrupts, may auto mask */
+	err = i40evf_request_traffic_irqs(adapter, netdev->name);
+	if (err)
+		goto err_req_irq;
+
+	i40evf_configure(adapter);
+
+	err = i40evf_up_complete(adapter);
+	if (err)
+		goto err_req_irq;
+
+	i40evf_irq_enable(adapter, true);
+
+	return 0;
+
+err_req_irq:
+	i40evf_down(adapter);
+	i40evf_free_traffic_irqs(adapter);
+err_setup_rx:
+	i40evf_free_all_rx_resources(adapter);
+err_setup_tx:
+	i40evf_free_all_tx_resources(adapter);
+
+	return err;
+}
+
+/**
+ * i40evf_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
+ * are freed, along with all transmit and receive resources.
+ **/
+static int i40evf_close(struct net_device *netdev)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	/* signal that we are down to the interrupt handler */
+	adapter->state = __I40EVF_DOWN;
+	set_bit(__I40E_DOWN, &adapter->vsi.state);
+
+	i40evf_down(adapter);
+	i40evf_free_traffic_irqs(adapter);
+
+	i40evf_free_all_tx_resources(adapter);
+	i40evf_free_all_rx_resources(adapter);
+
+	return 0;
+}
+
+/**
+ * i40evf_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *i40evf_get_stats(struct net_device *netdev)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	/* only return the current stats */
+	return &adapter->net_stats;
+}
+
+/**
+ * i40evf_reinit_locked - Software reinit
+ * @adapter: board private structure
+ *
+ * Reinititalizes the ring structures in response to a software configuration
+ * change. Roughly the same as close followed by open, but skips releasing
+ * and reallocating the interrupts.
+ **/
+void i40evf_reinit_locked(struct i40evf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int err;
+
+	WARN_ON(in_interrupt());
+
+	adapter->state = __I40EVF_RESETTING;
+
+	i40evf_down(adapter);
+
+	/* allocate transmit descriptors */
+	err = i40evf_setup_all_tx_resources(adapter);
+	if (err)
+		goto err_reinit;
+
+	/* allocate receive descriptors */
+	err = i40evf_setup_all_rx_resources(adapter);
+	if (err)
+		goto err_reinit;
+
+	i40evf_configure(adapter);
+
+	err = i40evf_up_complete(adapter);
+	if (err)
+		goto err_reinit;
+
+	i40evf_irq_enable(adapter, true);
+	return;
+
+err_reinit:
+	dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n");
+	i40evf_close(netdev);
+}
+
+/**
+ * i40evf_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+	if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
+		return -EINVAL;
+
+	/* must set new MTU before calling down or up */
+	netdev->mtu = new_mtu;
+	i40evf_reinit_locked(adapter);
+	return 0;
+}
+
+static const struct net_device_ops i40evf_netdev_ops = {
+	.ndo_open		= i40evf_open,
+	.ndo_stop		= i40evf_close,
+	.ndo_start_xmit		= i40evf_xmit_frame,
+	.ndo_get_stats		= i40evf_get_stats,
+	.ndo_set_rx_mode	= i40evf_set_rx_mode,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= i40evf_set_mac,
+	.ndo_change_mtu		= i40evf_change_mtu,
+	.ndo_tx_timeout		= i40evf_tx_timeout,
+	.ndo_vlan_rx_add_vid	= i40evf_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= i40evf_vlan_rx_kill_vid,
+};
+
+/**
+ * i40evf_check_reset_complete - check that VF reset is complete
+ * @hw: pointer to hw struct
+ *
+ * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
+ **/
+static int i40evf_check_reset_complete(struct i40e_hw *hw)
+{
+	u32 rstat;
+	int i;
+
+	for (i = 0; i < 100; i++) {
+		rstat = rd32(hw, I40E_VFGEN_RSTAT);
+		if (rstat == I40E_VFR_VFACTIVE)
+			return 0;
+		udelay(10);
+	}
+	return -EBUSY;
+}
+
+/**
+ * i40evf_init_task - worker thread to perform delayed initialization
+ * @work: pointer to work_struct containing our data
+ *
+ * This task completes the work that was begun in probe. Due to the nature
+ * of VF-PF communications, we may need to wait tens of milliseconds to get
+ * reponses back from the PF. Rather than busy-wait in probe and bog down the
+ * whole system, we'll do it in a task so we can sleep.
+ * This task only runs during driver init. Once we've established
+ * communications with the PF driver and set up our netdev, the watchdog
+ * takes over.
+ **/
+static void i40evf_init_task(struct work_struct *work)
+{
+	struct i40evf_adapter *adapter = container_of(work,
+						      struct i40evf_adapter,
+						      init_task.work);
+	struct net_device *netdev = adapter->netdev;
+	struct i40evf_mac_filter *f;
+	struct i40e_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	int i, err, bufsz;
+
+	switch (adapter->state) {
+	case __I40EVF_STARTUP:
+		/* driver loaded, probe complete */
+		err = i40e_set_mac_type(hw);
+		if (err) {
+			dev_info(&pdev->dev, "%s: set_mac_type failed: %d\n",
+				__func__, err);
+		goto err;
+		}
+		err = i40evf_check_reset_complete(hw);
+		if (err) {
+			dev_info(&pdev->dev, "%s: device is still in reset (%d).\n",
+				__func__, err);
+			goto err;
+		}
+		hw->aq.num_arq_entries = I40EVF_AQ_LEN;
+		hw->aq.num_asq_entries = I40EVF_AQ_LEN;
+		hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
+		hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
+
+		err = i40evf_init_adminq(hw);
+		if (err) {
+			dev_info(&pdev->dev, "%s: init_adminq failed: %d\n",
+				__func__, err);
+			goto err;
+		}
+		err = i40evf_send_api_ver(adapter);
+		if (err) {
+			dev_info(&pdev->dev, "%s: unable to send to PF (%d)\n",
+				__func__, err);
+			i40evf_shutdown_adminq(hw);
+			goto err;
+		}
+		adapter->state = __I40EVF_INIT_VERSION_CHECK;
+		goto restart;
+		break;
+	case __I40EVF_INIT_VERSION_CHECK:
+		if (!i40evf_asq_done(hw))
+			goto err;
+
+		/* aq msg sent, awaiting reply */
+		err = i40evf_verify_api_ver(adapter);
+		if (err) {
+			dev_err(&pdev->dev, "Unable to verify API version, error %d\n",
+				err);
+			goto err;
+		}
+		err = i40evf_send_vf_config_msg(adapter);
+		if (err) {
+			dev_err(&pdev->dev, "Unable send config request, error %d\n",
+				err);
+			goto err;
+		}
+		adapter->state = __I40EVF_INIT_GET_RESOURCES;
+		goto restart;
+		break;
+	case __I40EVF_INIT_GET_RESOURCES:
+		/* aq msg sent, awaiting reply */
+		if (!adapter->vf_res) {
+			bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
+				(I40E_MAX_VF_VSI *
+				 sizeof(struct i40e_virtchnl_vsi_resource));
+			adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
+			if (!adapter->vf_res) {
+				dev_err(&pdev->dev, "%s: unable to allocate memory\n",
+					__func__);
+				goto err;
+			}
+		}
+		err = i40evf_get_vf_config(adapter);
+		if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+			goto restart;
+		if (err) {
+			dev_info(&pdev->dev, "%s: unable to get VF config (%d)\n",
+				__func__, err);
+			goto err_alloc;
+		}
+		adapter->state = __I40EVF_INIT_SW;
+		break;
+	default:
+		goto err_alloc;
+	}
+	/* got VF config message back from PF, now we can parse it */
+	for (i = 0; i < adapter->vf_res->num_vsis; i++) {
+		if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+			adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+	}
+	if (!adapter->vsi_res) {
+		dev_info(&pdev->dev, "%s: no LAN VSI found\n", __func__);
+		goto err_alloc;
+	}
+
+	adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
+
+	adapter->txd_count = I40EVF_DEFAULT_TXD;
+	adapter->rxd_count = I40EVF_DEFAULT_RXD;
+
+	netdev->netdev_ops = &i40evf_netdev_ops;
+	i40evf_set_ethtool_ops(netdev);
+	netdev->watchdog_timeo = 5 * HZ;
+
+	netdev->features |= NETIF_F_SG |
+			    NETIF_F_IP_CSUM |
+			    NETIF_F_SCTP_CSUM |
+			    NETIF_F_IPV6_CSUM |
+			    NETIF_F_TSO |
+			    NETIF_F_TSO6 |
+			    NETIF_F_GRO;
+
+	if (adapter->vf_res->vf_offload_flags
+	    & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
+		netdev->vlan_features = netdev->features;
+		netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+				    NETIF_F_HW_VLAN_CTAG_RX |
+				    NETIF_F_HW_VLAN_CTAG_FILTER;
+	}
+
+	/* The HW MAC address was set and/or determined in sw_init */
+	if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
+		dev_info(&pdev->dev,
+			"Invalid MAC address %pMAC, using random\n",
+			adapter->hw.mac.addr);
+		random_ether_addr(adapter->hw.mac.addr);
+	}
+	memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+	memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+	INIT_LIST_HEAD(&adapter->mac_filter_list);
+	INIT_LIST_HEAD(&adapter->vlan_filter_list);
+	f = kzalloc(sizeof(*f), GFP_ATOMIC);
+	if (NULL == f)
+		goto err_sw_init;
+
+	memcpy(f->macaddr, adapter->hw.mac.addr, ETH_ALEN);
+	f->add = true;
+	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+	list_add(&f->list, &adapter->mac_filter_list);
+
+	init_timer(&adapter->watchdog_timer);
+	adapter->watchdog_timer.function = &i40evf_watchdog_timer;
+	adapter->watchdog_timer.data = (unsigned long)adapter;
+	mod_timer(&adapter->watchdog_timer, jiffies + 1);
+
+	err = i40evf_init_interrupt_scheme(adapter);
+	if (err)
+		goto err_sw_init;
+	i40evf_map_rings_to_vectors(adapter);
+	i40evf_configure_rss(adapter);
+	err = i40evf_request_misc_irq(adapter);
+	if (err)
+		goto err_sw_init;
+
+	netif_carrier_off(netdev);
+
+	strcpy(netdev->name, "eth%d");
+
+	adapter->vsi.id = adapter->vsi_res->vsi_id;
+	adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
+	adapter->vsi.back = adapter;
+	adapter->vsi.base_vector = 1;
+	adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
+	adapter->vsi.rx_itr_setting = I40E_ITR_DYNAMIC;
+	adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC;
+	adapter->vsi.netdev = adapter->netdev;
+
+	err = register_netdev(netdev);
+	if (err)
+		goto err_register;
+
+	adapter->netdev_registered = true;
+
+	netif_tx_stop_all_queues(netdev);
+
+	dev_info(&pdev->dev, "MAC address: %pMAC\n", adapter->hw.mac.addr);
+	if (netdev->features & NETIF_F_GRO)
+		dev_info(&pdev->dev, "GRO is enabled\n");
+
+	dev_info(&pdev->dev, "%s\n", i40evf_driver_string);
+	adapter->state = __I40EVF_DOWN;
+	set_bit(__I40E_DOWN, &adapter->vsi.state);
+	i40evf_misc_irq_enable(adapter);
+	return;
+restart:
+	schedule_delayed_work(&adapter->init_task,
+			      msecs_to_jiffies(50));
+	return;
+
+err_register:
+	i40evf_free_misc_irq(adapter);
+err_sw_init:
+	i40evf_reset_interrupt_capability(adapter);
+	adapter->state = __I40EVF_FAILED;
+err_alloc:
+	kfree(adapter->vf_res);
+	adapter->vf_res = NULL;
+err:
+	/* Things went into the weeds, so try again later */
+	if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
+		dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n");
+		if (hw->aq.asq.count)
+			i40evf_shutdown_adminq(hw); /* ignore error */
+		adapter->state = __I40EVF_FAILED;
+		return; /* do not reschedule */
+	}
+	schedule_delayed_work(&adapter->init_task, HZ * 3);
+	return;
+}
+
+/**
+ * i40evf_shutdown - Shutdown the device in preparation for a reboot
+ * @pdev: pci device structure
+ **/
+static void i40evf_shutdown(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev))
+		i40evf_close(netdev);
+
+#ifdef CONFIG_PM
+	pci_save_state(pdev);
+
+#endif
+	pci_disable_device(pdev);
+}
+
+/**
+ * i40evf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in i40evf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * i40evf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct net_device *netdev;
+	struct i40evf_adapter *adapter = NULL;
+	struct i40e_hw *hw = NULL;
+	int err, pci_using_dac;
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+
+	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+		pci_using_dac = true;
+		/* coherent mask for the same size will always succeed if
+		 * dma_set_mask does
+		 */
+		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+	} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+		pci_using_dac = false;
+		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+	} else {
+		dev_err(&pdev->dev, "%s: DMA configuration failed: %d\n",
+			 __func__, err);
+		err = -EIO;
+		goto err_dma;
+	}
+
+	err = pci_request_regions(pdev, i40evf_driver_name);
+	if (err) {
+		dev_err(&pdev->dev,
+			"pci_request_regions failed 0x%x\n", err);
+		goto err_pci_reg;
+	}
+
+	pci_enable_pcie_error_reporting(pdev);
+
+	pci_set_master(pdev);
+
+	netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter),
+				   MAX_TX_QUEUES);
+	if (!netdev) {
+		err = -ENOMEM;
+		goto err_alloc_etherdev;
+	}
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	pci_set_drvdata(pdev, netdev);
+	adapter = netdev_priv(netdev);
+	if (pci_using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+
+	hw = &adapter->hw;
+	hw->back = adapter;
+
+	adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+	adapter->state = __I40EVF_STARTUP;
+
+	/* Call save state here because it relies on the adapter struct. */
+	pci_save_state(pdev);
+
+	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+			      pci_resource_len(pdev, 0));
+	if (!hw->hw_addr) {
+		err = -EIO;
+		goto err_ioremap;
+	}
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_device_id = pdev->subsystem_device;
+	hw->bus.device = PCI_SLOT(pdev->devfn);
+	hw->bus.func = PCI_FUNC(pdev->devfn);
+
+	INIT_WORK(&adapter->reset_task, i40evf_reset_task);
+	INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
+	INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
+	INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
+	schedule_delayed_work(&adapter->init_task, 10);
+
+	return 0;
+
+err_ioremap:
+	free_netdev(netdev);
+err_alloc_etherdev:
+	pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+	pci_disable_device(pdev);
+	return err;
+}
+
+#ifdef CONFIG_PM
+/**
+ * i40evf_suspend - Power management suspend routine
+ * @pdev: PCI device information struct
+ * @state: unused
+ *
+ * Called when the system (VM) is entering sleep/suspend.
+ **/
+static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	int retval = 0;
+
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev)) {
+		rtnl_lock();
+		i40evf_down(adapter);
+		rtnl_unlock();
+	}
+	i40evf_free_misc_irq(adapter);
+	i40evf_reset_interrupt_capability(adapter);
+
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+
+	pci_disable_device(pdev);
+
+	return 0;
+}
+
+/**
+ * i40evf_resume - Power managment resume routine
+ * @pdev: PCI device information struct
+ *
+ * Called when the system (VM) is resumed from sleep/suspend.
+ **/
+static int i40evf_resume(struct pci_dev *pdev)
+{
+	struct i40evf_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
+	u32 err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	/* pci_restore_state clears dev->state_saved so call
+	 * pci_save_state to restore it.
+	 */
+	pci_save_state(pdev);
+
+	err = pci_enable_device_mem(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
+		return err;
+	}
+	pci_set_master(pdev);
+
+	rtnl_lock();
+	err = i40evf_set_interrupt_capability(adapter);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
+		return err;
+	}
+	err = i40evf_request_misc_irq(adapter);
+	rtnl_unlock();
+	if (err) {
+		dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
+		return err;
+	}
+
+	schedule_work(&adapter->reset_task);
+
+	netif_device_attach(netdev);
+
+	return err;
+}
+
+#endif /* CONFIG_PM */
+/**
+ * i40evf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * i40evf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void i40evf_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	struct i40e_hw *hw = &adapter->hw;
+
+	cancel_delayed_work_sync(&adapter->init_task);
+
+	if (adapter->netdev_registered) {
+		unregister_netdev(netdev);
+		adapter->netdev_registered = false;
+	}
+	adapter->state = __I40EVF_REMOVE;
+
+	if (adapter->num_msix_vectors) {
+		i40evf_misc_irq_disable(adapter);
+		del_timer_sync(&adapter->watchdog_timer);
+
+		flush_scheduled_work();
+
+		i40evf_free_misc_irq(adapter);
+
+		i40evf_reset_interrupt_capability(adapter);
+	}
+
+	if (hw->aq.asq.count)
+		i40evf_shutdown_adminq(hw);
+
+	iounmap(hw->hw_addr);
+	pci_release_regions(pdev);
+
+	i40evf_free_queues(adapter);
+	kfree(adapter->vf_res);
+
+	free_netdev(netdev);
+
+	pci_disable_pcie_error_reporting(pdev);
+
+	pci_disable_device(pdev);
+}
+
+static struct pci_driver i40evf_driver = {
+	.name     = i40evf_driver_name,
+	.id_table = i40evf_pci_tbl,
+	.probe    = i40evf_probe,
+	.remove   = i40evf_remove,
+#ifdef CONFIG_PM
+	.suspend  = i40evf_suspend,
+	.resume   = i40evf_resume,
+#endif
+	.shutdown = i40evf_shutdown,
+};
+
+/**
+ * i40e_init_module - Driver Registration Routine
+ *
+ * i40e_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init i40evf_init_module(void)
+{
+	int ret;
+	pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
+	       i40evf_driver_version);
+
+	pr_info("%s\n", i40evf_copyright);
+
+	ret = pci_register_driver(&i40evf_driver);
+	return ret;
+}
+
+module_init(i40evf_init_module);
+
+/**
+ * i40e_exit_module - Driver Exit Cleanup Routine
+ *
+ * i40e_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit i40evf_exit_module(void)
+{
+	pci_unregister_driver(&i40evf_driver);
+}
+
+module_exit(i40evf_exit_module);
+
+/* i40evf_main.c */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
new file mode 100644
index 000000000000..e6978d79e62b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -0,0 +1,772 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+
+/* busy wait delay in msec */
+#define I40EVF_BUSY_WAIT_DELAY 10
+#define I40EVF_BUSY_WAIT_COUNT 50
+
+/**
+ * i40evf_send_pf_msg
+ * @adapter: adapter structure
+ * @op: virtual channel opcode
+ * @msg: pointer to message buffer
+ * @len: message length
+ *
+ * Send message to PF and print status if failure.
+ **/
+static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
+			      enum i40e_virtchnl_ops op, u8 *msg, u16 len)
+{
+	struct i40e_hw *hw = &adapter->hw;
+	i40e_status err;
+
+	err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
+	if (err)
+		dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
+			op, err, hw->aq.asq_last_status);
+	return err;
+}
+
+/**
+ * i40evf_send_api_ver
+ * @adapter: adapter structure
+ *
+ * Send API version admin queue message to the PF. The reply is not checked
+ * in this function. Returns 0 if the message was successfully
+ * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+ **/
+int i40evf_send_api_ver(struct i40evf_adapter *adapter)
+{
+	struct i40e_virtchnl_version_info vvi;
+
+	vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
+	vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
+
+	return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_VERSION, (u8 *)&vvi,
+				  sizeof(vvi));
+}
+
+/**
+ * i40evf_verify_api_ver
+ * @adapter: adapter structure
+ *
+ * Compare API versions with the PF. Must be called after admin queue is
+ * initialized. Returns 0 if API versions match, -EIO if
+ * they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
+ **/
+int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
+{
+	struct i40e_virtchnl_version_info *pf_vvi;
+	struct i40e_hw *hw = &adapter->hw;
+	struct i40e_arq_event_info event;
+	i40e_status err;
+
+	event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
+	event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+	if (!event.msg_buf) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = i40evf_clean_arq_element(hw, &event, NULL);
+	if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+		goto out_alloc;
+
+	err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+	if (err) {
+		err = -EIO;
+		goto out_alloc;
+	}
+
+	if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
+	    I40E_VIRTCHNL_OP_VERSION) {
+		err = -EIO;
+		goto out_alloc;
+	}
+
+	pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
+	if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
+	    (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
+		err = -EIO;
+
+out_alloc:
+	kfree(event.msg_buf);
+out:
+	return err;
+}
+
+/**
+ * i40evf_send_vf_config_msg
+ * @adapter: adapter structure
+ *
+ * Send VF configuration request admin queue message to the PF. The reply
+ * is not checked in this function. Returns 0 if the message was
+ * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+ **/
+int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
+{
+	return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+				  NULL, 0);
+}
+
+/**
+ * i40evf_get_vf_config
+ * @hw: pointer to the hardware structure
+ * @len: length of buffer
+ *
+ * Get VF configuration from PF and populate hw structure. Must be called after
+ * admin queue is initialized. Busy waits until response is received from PF,
+ * with maximum timeout. Response from PF is returned in the buffer for further
+ * processing by the caller.
+ **/
+int i40evf_get_vf_config(struct i40evf_adapter *adapter)
+{
+	struct i40e_hw *hw = &adapter->hw;
+	struct i40e_arq_event_info event;
+	u16 len;
+	i40e_status err;
+
+	len =  sizeof(struct i40e_virtchnl_vf_resource) +
+		I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
+	event.msg_size = len;
+	event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+	if (!event.msg_buf) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = i40evf_clean_arq_element(hw, &event, NULL);
+	if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+		goto out_alloc;
+
+	err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+	if (err) {
+		dev_err(&adapter->pdev->dev,
+			"%s: Error returned from PF, %d, %d\n", __func__,
+			le32_to_cpu(event.desc.cookie_high),
+			le32_to_cpu(event.desc.cookie_low));
+		err = -EIO;
+		goto out_alloc;
+	}
+
+	if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
+	    I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
+		dev_err(&adapter->pdev->dev,
+			"%s: Invalid response from PF, %d, %d\n", __func__,
+			le32_to_cpu(event.desc.cookie_high),
+			le32_to_cpu(event.desc.cookie_low));
+		err = -EIO;
+		goto out_alloc;
+	}
+	memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len));
+
+	i40e_vf_parse_hw_config(hw, adapter->vf_res);
+out_alloc:
+	kfree(event.msg_buf);
+out:
+	return err;
+}
+
+/**
+ * i40evf_configure_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF set up our (previously allocated) queues.
+ **/
+void i40evf_configure_queues(struct i40evf_adapter *adapter)
+{
+	struct i40e_virtchnl_vsi_queue_config_info *vqci;
+	struct i40e_virtchnl_queue_pair_info *vqpi;
+	int pairs = adapter->vsi_res->num_queue_pairs;
+	int i, len;
+
+	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+			__func__, adapter->current_op);
+		return;
+	}
+	adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+	len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
+		       (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
+	vqci = kzalloc(len, GFP_ATOMIC);
+	if (!vqci) {
+		dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+			__func__);
+		return;
+	}
+	vqci->vsi_id = adapter->vsi_res->vsi_id;
+	vqci->num_queue_pairs = pairs;
+	vqpi = vqci->qpair;
+	/* Size check is not needed here - HW max is 16 queue pairs, and we
+	 * can fit info for 31 of them into the AQ buffer before it overflows.
+	 */
+	for (i = 0; i < pairs; i++) {
+		vqpi->txq.vsi_id = vqci->vsi_id;
+		vqpi->txq.queue_id = i;
+		vqpi->txq.ring_len = adapter->tx_rings[i]->count;
+		vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma;
+
+		vqpi->rxq.vsi_id = vqci->vsi_id;
+		vqpi->rxq.queue_id = i;
+		vqpi->rxq.ring_len = adapter->rx_rings[i]->count;
+		vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma;
+		vqpi->rxq.max_pkt_size = adapter->netdev->mtu
+					+ ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
+		vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len;
+		vqpi++;
+	}
+
+	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+			   (u8 *)vqci, len);
+	kfree(vqci);
+	adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+}
+
+/**
+ * i40evf_enable_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF enable all of our queues.
+ **/
+void i40evf_enable_queues(struct i40evf_adapter *adapter)
+{
+	struct i40e_virtchnl_queue_select vqs;
+
+	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+			__func__, adapter->current_op);
+		return;
+	}
+	adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
+	vqs.vsi_id = adapter->vsi_res->vsi_id;
+	vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
+	vqs.rx_queues = vqs.tx_queues;
+	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+			   (u8 *)&vqs, sizeof(vqs));
+	adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
+}
+
+/**
+ * i40evf_disable_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF disable all of our queues.
+ **/
+void i40evf_disable_queues(struct i40evf_adapter *adapter)
+{
+	struct i40e_virtchnl_queue_select vqs;
+
+	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+			__func__, adapter->current_op);
+		return;
+	}
+	adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
+	vqs.vsi_id = adapter->vsi_res->vsi_id;
+	vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
+	vqs.rx_queues = vqs.tx_queues;
+	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+			   (u8 *)&vqs, sizeof(vqs));
+	adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
+}
+
+/**
+ * i40evf_map_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF map queues to interrupt vectors. Misc causes, including
+ * admin queue, are always mapped to vector 0.
+ **/
+void i40evf_map_queues(struct i40evf_adapter *adapter)
+{
+	struct i40e_virtchnl_irq_map_info *vimi;
+	int v_idx, q_vectors, len;
+	struct i40e_q_vector *q_vector;
+
+	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+			__func__, adapter->current_op);
+		return;
+	}
+	adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
+
+	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+	len = sizeof(struct i40e_virtchnl_irq_map_info) +
+	      (adapter->num_msix_vectors *
+		sizeof(struct i40e_virtchnl_vector_map));
+	vimi = kzalloc(len, GFP_ATOMIC);
+	if (!vimi) {
+		dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+			__func__);
+		return;
+	}
+
+	vimi->num_vectors = adapter->num_msix_vectors;
+	/* Queue vectors first */
+	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+		q_vector = adapter->q_vector[v_idx];
+		vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
+		vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS;
+		vimi->vecmap[v_idx].txq_map = q_vector->ring_mask;
+		vimi->vecmap[v_idx].rxq_map = q_vector->ring_mask;
+	}
+	/* Misc vector last - this is only for AdminQ messages */
+	vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
+	vimi->vecmap[v_idx].vector_id = 0;
+	vimi->vecmap[v_idx].txq_map = 0;
+	vimi->vecmap[v_idx].rxq_map = 0;
+
+	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+			   (u8 *)vimi, len);
+	kfree(vimi);
+	adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
+}
+
+/**
+ * i40evf_add_ether_addrs
+ * @adapter: adapter structure
+ * @addrs: the MAC address filters to add (contiguous)
+ * @count: number of filters
+ *
+ * Request that the PF add one or more addresses to our filters.
+ **/
+void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
+{
+	struct i40e_virtchnl_ether_addr_list *veal;
+	int len, i = 0, count = 0;
+	struct i40evf_mac_filter *f;
+
+	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+			__func__, adapter->current_op);
+		return;
+	}
+	list_for_each_entry(f, &adapter->mac_filter_list, list) {
+		if (f->add)
+			count++;
+	}
+	if (!count) {
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+		return;
+	}
+	adapter->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
+
+	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+	      (count * sizeof(struct i40e_virtchnl_ether_addr));
+	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+		dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
+			__func__);
+		count = (I40EVF_MAX_AQ_BUF_SIZE -
+			 sizeof(struct i40e_virtchnl_ether_addr_list)) /
+			sizeof(struct i40e_virtchnl_ether_addr);
+		len = I40EVF_MAX_AQ_BUF_SIZE;
+	}
+
+	veal = kzalloc(len, GFP_ATOMIC);
+	if (!veal) {
+		dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+			__func__);
+		return;
+	}
+	veal->vsi_id = adapter->vsi_res->vsi_id;
+	veal->num_elements = count;
+	list_for_each_entry(f, &adapter->mac_filter_list, list) {
+		if (f->add) {
+			memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
+			i++;
+			f->add = false;
+		}
+	}
+	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+			   (u8 *)veal, len);
+	kfree(veal);
+	adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+}
+
+/**
+ * i40evf_del_ether_addrs
+ * @adapter: adapter structure
+ * @addrs: the MAC address filters to remove (contiguous)
+ * @count: number of filtes
+ *
+ * Request that the PF remove one or more addresses from our filters.
+ **/
+void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
+{
+	struct i40e_virtchnl_ether_addr_list *veal;
+	struct i40evf_mac_filter *f, *ftmp;
+	int len, i = 0, count = 0;
+
+	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+			__func__, adapter->current_op);
+		return;
+	}
+	list_for_each_entry(f, &adapter->mac_filter_list, list) {
+		if (f->remove)
+			count++;
+	}
+	if (!count) {
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+		return;
+	}
+	adapter->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
+
+	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+	      (count * sizeof(struct i40e_virtchnl_ether_addr));
+	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+		dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
+			__func__);
+		count = (I40EVF_MAX_AQ_BUF_SIZE -
+			 sizeof(struct i40e_virtchnl_ether_addr_list)) /
+			sizeof(struct i40e_virtchnl_ether_addr);
+		len = I40EVF_MAX_AQ_BUF_SIZE;
+	}
+	veal = kzalloc(len, GFP_ATOMIC);
+	if (!veal) {
+		dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+			__func__);
+		return;
+	}
+	veal->vsi_id = adapter->vsi_res->vsi_id;
+	veal->num_elements = count;
+	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+		if (f->remove) {
+			memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
+			i++;
+			list_del(&f->list);
+			kfree(f);
+		}
+	}
+	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+			   (u8 *)veal, len);
+	kfree(veal);
+	adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+}
+
+/**
+ * i40evf_add_vlans
+ * @adapter: adapter structure
+ * @vlans: the VLANs to add
+ * @count: number of VLANs
+ *
+ * Request that the PF add one or more VLAN filters to our VSI.
+ **/
+void i40evf_add_vlans(struct i40evf_adapter *adapter)
+{
+	struct i40e_virtchnl_vlan_filter_list *vvfl;
+	int len, i = 0, count = 0;
+	struct i40evf_vlan_filter *f;
+
+	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+			__func__, adapter->current_op);
+		return;
+	}
+
+	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+		if (f->add)
+			count++;
+	}
+	if (!count) {
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+		return;
+	}
+	adapter->current_op = I40E_VIRTCHNL_OP_ADD_VLAN;
+
+	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+	      (count * sizeof(u16));
+	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+		dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
+			__func__);
+		count = (I40EVF_MAX_AQ_BUF_SIZE -
+			 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+			sizeof(u16);
+		len = I40EVF_MAX_AQ_BUF_SIZE;
+	}
+	vvfl = kzalloc(len, GFP_ATOMIC);
+	if (!vvfl) {
+		dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+			__func__);
+		return;
+	}
+	vvfl->vsi_id = adapter->vsi_res->vsi_id;
+	vvfl->num_elements = count;
+	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+		if (f->add) {
+			vvfl->vlan_id[i] = f->vlan;
+			i++;
+			f->add = false;
+		}
+	}
+	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
+	kfree(vvfl);
+	adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+}
+
+/**
+ * i40evf_del_vlans
+ * @adapter: adapter structure
+ * @vlans: the VLANs to remove
+ * @count: number of VLANs
+ *
+ * Request that the PF remove one or more VLAN filters from our VSI.
+ **/
+void i40evf_del_vlans(struct i40evf_adapter *adapter)
+{
+	struct i40e_virtchnl_vlan_filter_list *vvfl;
+	struct i40evf_vlan_filter *f, *ftmp;
+	int len, i = 0, count = 0;
+
+	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+			__func__, adapter->current_op);
+		return;
+	}
+
+	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+		if (f->remove)
+			count++;
+	}
+	if (!count) {
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+		return;
+	}
+	adapter->current_op = I40E_VIRTCHNL_OP_DEL_VLAN;
+
+	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+	      (count * sizeof(u16));
+	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+		dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
+			__func__);
+		count = (I40EVF_MAX_AQ_BUF_SIZE -
+			 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+			sizeof(u16);
+		len = I40EVF_MAX_AQ_BUF_SIZE;
+	}
+	vvfl = kzalloc(len, GFP_ATOMIC);
+	if (!vvfl) {
+		dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
+			__func__);
+		return;
+	}
+	vvfl->vsi_id = adapter->vsi_res->vsi_id;
+	vvfl->num_elements = count;
+	list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+		if (f->remove) {
+			vvfl->vlan_id[i] = f->vlan;
+			i++;
+			list_del(&f->list);
+			kfree(f);
+		}
+	}
+	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
+	kfree(vvfl);
+	adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+}
+
+/**
+ * i40evf_set_promiscuous
+ * @adapter: adapter structure
+ * @flags: bitmask to control unicast/multicast promiscuous.
+ *
+ * Request that the PF enable promiscuous mode for our VSI.
+ **/
+void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
+{
+	struct i40e_virtchnl_promisc_info vpi;
+
+	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+			__func__, adapter->current_op);
+		return;
+	}
+	adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+	vpi.vsi_id = adapter->vsi_res->vsi_id;
+	vpi.flags = flags;
+	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+			   (u8 *)&vpi, sizeof(vpi));
+}
+
+/**
+ * i40evf_request_stats
+ * @adapter: adapter structure
+ *
+ * Request VSI statistics from PF.
+ **/
+void i40evf_request_stats(struct i40evf_adapter *adapter)
+{
+	struct i40e_virtchnl_queue_select vqs;
+	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+		/* no error message, this isn't crucial */
+		return;
+	}
+	adapter->current_op = I40E_VIRTCHNL_OP_GET_STATS;
+	vqs.vsi_id = adapter->vsi_res->vsi_id;
+	/* queue maps are ignored for this message - only the vsi is used */
+	if (i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_STATS,
+			       (u8 *)&vqs, sizeof(vqs)))
+		/* if the request failed, don't lock out others */
+		adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
+
+/**
+ * i40evf_virtchnl_completion
+ * @adapter: adapter structure
+ * @v_opcode: opcode sent by PF
+ * @v_retval: retval sent by PF
+ * @msg: message sent by PF
+ * @msglen: message length
+ *
+ * Asynchronous completion function for admin queue messages. Rather than busy
+ * wait, we fire off our requests and assume that no errors will be returned.
+ * This function handles the reply messages.
+ **/
+void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
+				enum i40e_virtchnl_ops v_opcode,
+				i40e_status v_retval,
+				u8 *msg, u16 msglen)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
+		struct i40e_virtchnl_pf_event *vpe =
+			(struct i40e_virtchnl_pf_event *)msg;
+		switch (vpe->event) {
+		case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+			adapter->link_up =
+				vpe->event_data.link_event.link_status;
+			if (adapter->link_up && !netif_carrier_ok(netdev)) {
+				dev_info(&adapter->pdev->dev, "NIC Link is Up\n");
+				netif_carrier_on(netdev);
+				netif_tx_wake_all_queues(netdev);
+			} else if (!adapter->link_up) {
+				dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
+				netif_carrier_off(netdev);
+				netif_tx_stop_all_queues(netdev);
+			}
+			break;
+		case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+			adapter->state = __I40EVF_RESETTING;
+			schedule_work(&adapter->reset_task);
+			dev_info(&adapter->pdev->dev,
+				 "%s: hardware reset pending\n", __func__);
+			break;
+		default:
+			dev_err(&adapter->pdev->dev,
+				"%s: Unknown event %d from pf\n",
+				__func__, vpe->event);
+			break;
+
+		}
+		return;
+	}
+	if (v_opcode != adapter->current_op) {
+		dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d.\n",
+			__func__, adapter->current_op, v_opcode);
+		/* We're probably completely screwed at this point, but clear
+		 * the current op and try to carry on....
+		 */
+		adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+		return;
+	}
+	if (v_retval) {
+		dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d!\n",
+			__func__, v_retval, v_opcode);
+	}
+	switch (v_opcode) {
+	case I40E_VIRTCHNL_OP_GET_STATS: {
+		struct i40e_eth_stats *stats =
+			(struct i40e_eth_stats *)msg;
+		adapter->net_stats.rx_packets = stats->rx_unicast +
+						 stats->rx_multicast +
+						 stats->rx_broadcast;
+		adapter->net_stats.tx_packets = stats->tx_unicast +
+						 stats->tx_multicast +
+						 stats->tx_broadcast;
+		adapter->net_stats.rx_bytes = stats->rx_bytes;
+		adapter->net_stats.tx_bytes = stats->tx_bytes;
+		adapter->net_stats.rx_errors = stats->rx_errors;
+		adapter->net_stats.tx_errors = stats->tx_errors;
+		adapter->net_stats.rx_dropped = stats->rx_missed;
+		adapter->net_stats.tx_dropped = stats->tx_discards;
+		adapter->current_stats = *stats;
+		}
+		break;
+	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+		adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_MAC_FILTER);
+		break;
+	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+		adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_MAC_FILTER);
+		break;
+	case I40E_VIRTCHNL_OP_ADD_VLAN:
+		adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_VLAN_FILTER);
+		break;
+	case I40E_VIRTCHNL_OP_DEL_VLAN:
+		adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_VLAN_FILTER);
+		break;
+	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+		adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ENABLE_QUEUES);
+		/* enable transmits */
+		i40evf_irq_enable(adapter, true);
+		netif_tx_start_all_queues(adapter->netdev);
+		netif_carrier_on(adapter->netdev);
+		break;
+	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+		adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DISABLE_QUEUES);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+		adapter->aq_pending &= ~(I40EVF_FLAG_AQ_CONFIGURE_QUEUES);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+		adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS);
+		break;
+	default:
+		dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF.\n",
+			__func__, v_opcode);
+		break;
+	} /* switch v_opcode */
+	adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 47c2d10df826..06df6928f44c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -113,6 +113,59 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
 }
 
 /**
+ *  igb_check_for_link_media_swap - Check which M88E1112 interface linked
+ *  @hw: pointer to the HW structure
+ *
+ *  Poll the M88E1112 interfaces to see which interface achieved link.
+ */
+static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	u8 port = 0;
+
+	/* Check the copper medium. */
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+	if (ret_val)
+		return ret_val;
+
+	if (data & E1000_M88E1112_STATUS_LINK)
+		port = E1000_MEDIA_PORT_COPPER;
+
+	/* Check the other medium. */
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+	if (ret_val)
+		return ret_val;
+
+	/* reset page to 0 */
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+	if (ret_val)
+		return ret_val;
+
+	if (data & E1000_M88E1112_STATUS_LINK)
+		port = E1000_MEDIA_PORT_OTHER;
+
+	/* Determine if a swap needs to happen. */
+	if (port && (hw->dev_spec._82575.media_port != port)) {
+		hw->dev_spec._82575.media_port = port;
+		hw->dev_spec._82575.media_changed = true;
+	} else {
+		ret_val = igb_check_for_link_82575(hw);
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
  *  igb_init_phy_params_82575 - Init PHY func ptrs.
  *  @hw: pointer to the HW structure
  **/
@@ -189,6 +242,29 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
 		else
 			phy->ops.get_cable_length = igb_get_cable_length_m88;
 		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+		/* Check if this PHY is confgured for media swap. */
+		if (phy->id == M88E1112_E_PHY_ID) {
+			u16 data;
+
+			ret_val = phy->ops.write_reg(hw,
+						     E1000_M88E1112_PAGE_ADDR,
+						     2);
+			if (ret_val)
+				goto out;
+
+			ret_val = phy->ops.read_reg(hw,
+						    E1000_M88E1112_MAC_CTRL_1,
+						    &data);
+			if (ret_val)
+				goto out;
+
+			data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
+			       E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
+			if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
+			    data == E1000_M88E1112_AUTO_COPPER_BASEX)
+				hw->mac.ops.check_for_link =
+						igb_check_for_link_media_swap;
+		}
 		break;
 	case IGP03E1000_E_PHY_ID:
 		phy->type = e1000_phy_igp_3;
@@ -365,6 +441,19 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
 			? igb_setup_copper_link_82575
 			: igb_setup_serdes_link_82575;
 
+	if (mac->type == e1000_82580) {
+		switch (hw->device_id) {
+		/* feature not supported on these id's */
+		case E1000_DEV_ID_DH89XXCC_SGMII:
+		case E1000_DEV_ID_DH89XXCC_SERDES:
+		case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+		case E1000_DEV_ID_DH89XXCC_SFP:
+			break;
+		default:
+			hw->dev_spec._82575.mas_capable = true;
+			break;
+		}
+	}
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 978eca31ceda..0571b973be80 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -205,6 +205,11 @@
  */
 
 #define E1000_CONNSW_ENRGSRC             0x4
+#define E1000_CONNSW_PHYSD		0x400
+#define E1000_CONNSW_PHY_PDN		0x800
+#define E1000_CONNSW_SERDESD		0x200
+#define E1000_CONNSW_AUTOSENSE_CONF	0x2
+#define E1000_CONNSW_AUTOSENSE_EN	0x1
 #define E1000_PCS_CFG_PCS_EN             8
 #define E1000_PCS_LCTL_FLV_LINK_UP       1
 #define E1000_PCS_LCTL_FSV_100           2
@@ -532,6 +537,17 @@
 #define E1000_MDICNFG_PHY_MASK    0x03E00000
 #define E1000_MDICNFG_PHY_SHIFT   21
 
+#define E1000_MEDIA_PORT_COPPER			1
+#define E1000_MEDIA_PORT_OTHER			2
+#define E1000_M88E1112_AUTO_COPPER_SGMII	0x2
+#define E1000_M88E1112_AUTO_COPPER_BASEX	0x3
+#define E1000_M88E1112_STATUS_LINK		0x0004 /* Interface Link Bit */
+#define E1000_M88E1112_MAC_CTRL_1		0x10
+#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK	0x0380 /* Mode Select */
+#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT	7
+#define E1000_M88E1112_PAGE_ADDR		0x16
+#define E1000_M88E1112_STATUS			0x01
+
 /* PCI Express Control */
 #define E1000_GCR_CMPL_TMOUT_MASK       0x0000F000
 #define E1000_GCR_CMPL_TMOUT_10ms       0x00001000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 2e166b22d52b..ab99e2b582a8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -533,6 +533,9 @@ struct e1000_dev_spec_82575 {
 	bool clear_semaphore_once;
 	struct e1000_sfp_flags eth_flags;
 	bool module_plugged;
+	u8 media_port;
+	bool media_changed;
+	bool mas_capable;
 };
 
 struct e1000_hw {
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 5e9ed89403aa..ccf472f073dd 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -41,6 +41,7 @@
 #include <linux/if_vlan.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
+#include <linux/pci.h>
 
 struct igb_adapter;
 
@@ -67,6 +68,7 @@ struct igb_adapter;
 #define IGB_MIN_ITR_USECS	10
 #define NON_Q_VECTORS		1
 #define MAX_Q_VECTORS		8
+#define MAX_MSIX_ENTRIES	10
 
 /* Transmit and receive queues */
 #define IGB_MAX_RX_QUEUES	8
@@ -127,9 +129,9 @@ struct vf_data_storage {
 #define IGB_TX_PTHRESH	((hw->mac.type == e1000_i354) ? 20 : 8)
 #define IGB_TX_HTHRESH	1
 #define IGB_RX_WTHRESH	((hw->mac.type == e1000_82576 && \
-			  adapter->msix_entries) ? 1 : 4)
+			  (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4)
 #define IGB_TX_WTHRESH	((hw->mac.type == e1000_82576 && \
-			  adapter->msix_entries) ? 1 : 16)
+			  (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16)
 
 /* this is the size past which hardware will drop packets when setting LPE=0 */
 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
@@ -337,8 +339,10 @@ struct hwmon_attr {
 	};
 
 struct hwmon_buff {
-	struct device *device;
-	struct hwmon_attr *hwmon_list;
+	struct attribute_group group;
+	const struct attribute_group *groups[2];
+	struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1];
+	struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4];
 	unsigned int n_hwmon;
 	};
 #endif
@@ -355,7 +359,7 @@ struct igb_adapter {
 	unsigned int flags;
 
 	unsigned int num_q_vectors;
-	struct msix_entry *msix_entries;
+	struct msix_entry msix_entries[MAX_MSIX_ENTRIES];
 
 	/* Interrupt Throttle Rate */
 	u32 rx_itr_setting;
@@ -440,7 +444,7 @@ struct igb_adapter {
 
 	char fw_version[32];
 #ifdef CONFIG_IGB_HWMON
-	struct hwmon_buff igb_hwmon_buff;
+	struct hwmon_buff *igb_hwmon_buff;
 	bool ets;
 #endif
 	struct i2c_algo_bit_data i2c_algo;
@@ -450,6 +454,8 @@ struct igb_adapter {
 	u8 rss_indir_tbl[IGB_RETA_SIZE];
 
 	unsigned long link_check_timeout;
+	int copper_tries;
+	struct e1000_info ei;
 };
 
 #define IGB_FLAG_HAS_MSI		(1 << 0)
@@ -462,6 +468,16 @@ struct igb_adapter {
 #define IGB_FLAG_RSS_FIELD_IPV6_UDP	(1 << 7)
 #define IGB_FLAG_WOL_SUPPORTED		(1 << 8)
 #define IGB_FLAG_NEED_LINK_UPDATE	(1 << 9)
+#define IGB_FLAG_MEDIA_RESET		(1 << 10)
+#define IGB_FLAG_MAS_CAPABLE		(1 << 11)
+#define IGB_FLAG_MAS_ENABLE		(1 << 12)
+#define IGB_FLAG_HAS_MSIX		(1 << 13)
+
+/* Media Auto Sense */
+#define IGB_MAS_ENABLE_0		0X0001
+#define IGB_MAS_ENABLE_1		0X0002
+#define IGB_MAS_ENABLE_2		0X0004
+#define IGB_MAS_ENABLE_3		0X0008
 
 /* DMA Coalescing defines */
 #define IGB_MIN_TXPBSIZE	20408
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index c3143da497c8..1df02378de69 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1386,7 +1386,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
 	*data = 0;
 
 	/* Hook up test interrupt handler just for this test */
-	if (adapter->msix_entries) {
+	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
 		if (request_irq(adapter->msix_entries[0].vector,
 		                igb_test_intr, 0, netdev->name, adapter)) {
 			*data = 1;
@@ -1519,7 +1519,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
 	msleep(10);
 
 	/* Unhook test interrupt handler */
-	if (adapter->msix_entries)
+	if (adapter->flags & IGB_FLAG_HAS_MSIX)
 		free_irq(adapter->msix_entries[0].vector, adapter);
 	else
 		free_irq(irq, adapter);
@@ -1983,6 +1983,10 @@ static void igb_diag_test(struct net_device *netdev,
 	bool if_running = netif_running(netdev);
 
 	set_bit(__IGB_TESTING, &adapter->state);
+
+	/* can't do offline tests on media switching devices */
+	if (adapter->hw.dev_spec._82575.mas_capable)
+		eth_test->flags &= ~ETH_TEST_FL_OFFLINE;
 	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
 		/* Offline tests */
 
@@ -2929,7 +2933,7 @@ static void igb_get_channels(struct net_device *netdev,
 	ch->max_combined = igb_max_channels(adapter);
 
 	/* Report info for other vector */
-	if (adapter->msix_entries) {
+	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
 		ch->max_other = NON_Q_VECTORS;
 		ch->other_count = NON_Q_VECTORS;
 	}
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 58f1ce967aeb..e0af5bc61613 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -117,29 +117,29 @@ static int igb_add_hwmon_attr(struct igb_adapter *adapter,
 	unsigned int n_attr;
 	struct hwmon_attr *igb_attr;
 
-	n_attr = adapter->igb_hwmon_buff.n_hwmon;
-	igb_attr = &adapter->igb_hwmon_buff.hwmon_list[n_attr];
+	n_attr = adapter->igb_hwmon_buff->n_hwmon;
+	igb_attr = &adapter->igb_hwmon_buff->hwmon_list[n_attr];
 
 	switch (type) {
 	case IGB_HWMON_TYPE_LOC:
 		igb_attr->dev_attr.show = igb_hwmon_show_location;
 		snprintf(igb_attr->name, sizeof(igb_attr->name),
-			 "temp%u_label", offset);
+			 "temp%u_label", offset + 1);
 		break;
 	case IGB_HWMON_TYPE_TEMP:
 		igb_attr->dev_attr.show = igb_hwmon_show_temp;
 		snprintf(igb_attr->name, sizeof(igb_attr->name),
-			 "temp%u_input", offset);
+			 "temp%u_input", offset + 1);
 		break;
 	case IGB_HWMON_TYPE_CAUTION:
 		igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh;
 		snprintf(igb_attr->name, sizeof(igb_attr->name),
-			 "temp%u_max", offset);
+			 "temp%u_max", offset + 1);
 		break;
 	case IGB_HWMON_TYPE_MAX:
 		igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh;
 		snprintf(igb_attr->name, sizeof(igb_attr->name),
-			 "temp%u_crit", offset);
+			 "temp%u_crit", offset + 1);
 		break;
 	default:
 		rc = -EPERM;
@@ -154,30 +154,16 @@ static int igb_add_hwmon_attr(struct igb_adapter *adapter,
 	igb_attr->dev_attr.attr.mode = S_IRUGO;
 	igb_attr->dev_attr.attr.name = igb_attr->name;
 	sysfs_attr_init(&igb_attr->dev_attr.attr);
-	rc = device_create_file(&adapter->pdev->dev,
-				&igb_attr->dev_attr);
-	if (rc == 0)
-		++adapter->igb_hwmon_buff.n_hwmon;
 
-	return rc;
+	adapter->igb_hwmon_buff->attrs[n_attr] = &igb_attr->dev_attr.attr;
+
+	++adapter->igb_hwmon_buff->n_hwmon;
+
+	return 0;
 }
 
 static void igb_sysfs_del_adapter(struct igb_adapter *adapter)
 {
-	int i;
-
-	if (adapter == NULL)
-		return;
-
-	for (i = 0; i < adapter->igb_hwmon_buff.n_hwmon; i++) {
-		device_remove_file(&adapter->pdev->dev,
-			   &adapter->igb_hwmon_buff.hwmon_list[i].dev_attr);
-	}
-
-	kfree(adapter->igb_hwmon_buff.hwmon_list);
-
-	if (adapter->igb_hwmon_buff.device)
-		hwmon_device_unregister(adapter->igb_hwmon_buff.device);
 }
 
 /* called from igb_main.c */
@@ -189,11 +175,11 @@ void igb_sysfs_exit(struct igb_adapter *adapter)
 /* called from igb_main.c */
 int igb_sysfs_init(struct igb_adapter *adapter)
 {
-	struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff;
+	struct hwmon_buff *igb_hwmon;
+	struct i2c_client *client;
+	struct device *hwmon_dev;
 	unsigned int i;
-	int n_attrs;
 	int rc = 0;
-	struct i2c_client *client = NULL;
 
 	/* If this method isn't defined we don't support thermals */
 	if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
@@ -201,34 +187,16 @@ int igb_sysfs_init(struct igb_adapter *adapter)
 
 	/* Don't create thermal hwmon interface if no sensors present */
 	rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw));
-		if (rc)
-			goto exit;
-
-	/* init i2c_client */
-	client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
-	if (client == NULL) {
-		dev_info(&adapter->pdev->dev,
-			"Failed to create new i2c device..\n");
+	if (rc)
 		goto exit;
-	}
-	adapter->i2c_client = client;
 
-	/* Allocation space for max attributes
-	 * max num sensors * values (loc, temp, max, caution)
-	 */
-	n_attrs = E1000_MAX_SENSORS * 4;
-	igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
-					GFP_KERNEL);
-	if (!igb_hwmon->hwmon_list) {
+	igb_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*igb_hwmon),
+				 GFP_KERNEL);
+	if (!igb_hwmon) {
 		rc = -ENOMEM;
-		goto err;
-	}
-
-	igb_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
-	if (IS_ERR(igb_hwmon->device)) {
-		rc = PTR_ERR(igb_hwmon->device);
-		goto err;
+		goto exit;
 	}
+	adapter->igb_hwmon_buff = igb_hwmon;
 
 	for (i = 0; i < E1000_MAX_SENSORS; i++) {
 
@@ -240,11 +208,39 @@ int igb_sysfs_init(struct igb_adapter *adapter)
 
 		/* Bail if any hwmon attr struct fails to initialize */
 		rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION);
-		rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
-		rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
-		rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
 		if (rc)
-			goto err;
+			goto exit;
+		rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
+		if (rc)
+			goto exit;
+		rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
+		if (rc)
+			goto exit;
+		rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
+		if (rc)
+			goto exit;
+	}
+
+	/* init i2c_client */
+	client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
+	if (client == NULL) {
+		dev_info(&adapter->pdev->dev,
+			 "Failed to create new i2c device.\n");
+		rc = -ENODEV;
+		goto exit;
+	}
+	adapter->i2c_client = client;
+
+	igb_hwmon->groups[0] = &igb_hwmon->group;
+	igb_hwmon->group.attrs = igb_hwmon->attrs;
+
+	hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev,
+							   client->name,
+							   igb_hwmon,
+							   igb_hwmon->groups);
+	if (IS_ERR(hwmon_dev)) {
+		rc = PTR_ERR(hwmon_dev);
+		goto err;
 	}
 
 	goto exit;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 025e5f4b7481..46d31a49f5ea 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -803,7 +803,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
 			msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
 		if (tx_queue > IGB_N0_QUEUE)
 			msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
-		if (!adapter->msix_entries && msix_vector == 0)
+		if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
 			msixbm |= E1000_EIMS_OTHER;
 		array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
 		q_vector->eims_value = msixbm;
@@ -983,43 +983,58 @@ err_out:
 	return err;
 }
 
-static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
-{
-	if (adapter->msix_entries) {
-		pci_disable_msix(adapter->pdev);
-		kfree(adapter->msix_entries);
-		adapter->msix_entries = NULL;
-	} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
-		pci_disable_msi(adapter->pdev);
-	}
-}
-
 /**
  *  igb_free_q_vector - Free memory allocated for specific interrupt vector
  *  @adapter: board private structure to initialize
  *  @v_idx: Index of vector to be freed
  *
- *  This function frees the memory allocated to the q_vector.  In addition if
- *  NAPI is enabled it will delete any references to the NAPI struct prior
- *  to freeing the q_vector.
+ *  This function frees the memory allocated to the q_vector.
  **/
 static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
 {
 	struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
 
+	adapter->q_vector[v_idx] = NULL;
+
+	/* igb_get_stats64() might access the rings on this vector,
+	 * we must wait a grace period before freeing it.
+	 */
+	kfree_rcu(q_vector, rcu);
+}
+
+/**
+ *  igb_reset_q_vector - Reset config for interrupt vector
+ *  @adapter: board private structure to initialize
+ *  @v_idx: Index of vector to be reset
+ *
+ *  If NAPI is enabled it will delete any references to the
+ *  NAPI struct. This is preparation for igb_free_q_vector.
+ **/
+static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
+{
+	struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
 	if (q_vector->tx.ring)
 		adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
 
 	if (q_vector->rx.ring)
 		adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
 
-	adapter->q_vector[v_idx] = NULL;
 	netif_napi_del(&q_vector->napi);
 
-	/* igb_get_stats64() might access the rings on this vector,
-	 * we must wait a grace period before freeing it.
-	 */
-	kfree_rcu(q_vector, rcu);
+}
+
+static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
+{
+	int v_idx = adapter->num_q_vectors;
+
+	if (adapter->flags & IGB_FLAG_HAS_MSIX)
+		pci_disable_msix(adapter->pdev);
+	else if (adapter->flags & IGB_FLAG_HAS_MSI)
+		pci_disable_msi(adapter->pdev);
+
+	while (v_idx--)
+		igb_reset_q_vector(adapter, v_idx);
 }
 
 /**
@@ -1038,8 +1053,10 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
 	adapter->num_rx_queues = 0;
 	adapter->num_q_vectors = 0;
 
-	while (v_idx--)
+	while (v_idx--) {
+		igb_reset_q_vector(adapter, v_idx);
 		igb_free_q_vector(adapter, v_idx);
+	}
 }
 
 /**
@@ -1070,6 +1087,7 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
 
 	if (!msix)
 		goto msi_only;
+	adapter->flags |= IGB_FLAG_HAS_MSIX;
 
 	/* Number of supported queues. */
 	adapter->num_rx_queues = adapter->rss_queues;
@@ -1090,12 +1108,6 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
 
 	/* add 1 vector for link status interrupts */
 	numvecs++;
-	adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
-					GFP_KERNEL);
-
-	if (!adapter->msix_entries)
-		goto msi_only;
-
 	for (i = 0; i < numvecs; i++)
 		adapter->msix_entries[i].entry = i;
 
@@ -1172,7 +1184,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
 	       (sizeof(struct igb_ring) * ring_count);
 
 	/* allocate q_vector and rings */
-	q_vector = kzalloc(size, GFP_KERNEL);
+	q_vector = adapter->q_vector[v_idx];
+	if (!q_vector)
+		q_vector = kzalloc(size, GFP_KERNEL);
 	if (!q_vector)
 		return -ENOMEM;
 
@@ -1370,7 +1384,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
 	struct pci_dev *pdev = adapter->pdev;
 	int err = 0;
 
-	if (adapter->msix_entries) {
+	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
 		err = igb_request_msix(adapter);
 		if (!err)
 			goto request_done;
@@ -1414,7 +1428,7 @@ request_done:
 
 static void igb_free_irq(struct igb_adapter *adapter)
 {
-	if (adapter->msix_entries) {
+	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
 		int vector = 0, i;
 
 		free_irq(adapter->msix_entries[vector++].vector, adapter);
@@ -1439,7 +1453,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
 	 * mapped into these registers and so clearing the bits can cause
 	 * issues on the VF drivers so we only need to clear what we set
 	 */
-	if (adapter->msix_entries) {
+	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
 		u32 regval = rd32(E1000_EIAM);
 		wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
 		wr32(E1000_EIMC, adapter->eims_enable_mask);
@@ -1450,7 +1464,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
 	wr32(E1000_IAM, 0);
 	wr32(E1000_IMC, ~0);
 	wrfl();
-	if (adapter->msix_entries) {
+	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
 		int i;
 		for (i = 0; i < adapter->num_q_vectors; i++)
 			synchronize_irq(adapter->msix_entries[i].vector);
@@ -1467,7 +1481,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 
-	if (adapter->msix_entries) {
+	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
 		u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
 		u32 regval = rd32(E1000_EIAC);
 		wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
@@ -1607,6 +1621,73 @@ static void igb_power_down_link(struct igb_adapter *adapter)
 }
 
 /**
+ * Detect and switch function for Media Auto Sense
+ * @adapter: address of the board private structure
+ **/
+static void igb_check_swap_media(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl_ext, connsw;
+	bool swap_now = false;
+
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+	connsw = rd32(E1000_CONNSW);
+
+	/* need to live swap if current media is copper and we have fiber/serdes
+	 * to go to.
+	 */
+
+	if ((hw->phy.media_type == e1000_media_type_copper) &&
+	    (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
+		swap_now = true;
+	} else if (!(connsw & E1000_CONNSW_SERDESD)) {
+		/* copper signal takes time to appear */
+		if (adapter->copper_tries < 4) {
+			adapter->copper_tries++;
+			connsw |= E1000_CONNSW_AUTOSENSE_CONF;
+			wr32(E1000_CONNSW, connsw);
+			return;
+		} else {
+			adapter->copper_tries = 0;
+			if ((connsw & E1000_CONNSW_PHYSD) &&
+			    (!(connsw & E1000_CONNSW_PHY_PDN))) {
+				swap_now = true;
+				connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
+				wr32(E1000_CONNSW, connsw);
+			}
+		}
+	}
+
+	if (!swap_now)
+		return;
+
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		netdev_info(adapter->netdev,
+			"MAS: changing media to fiber/serdes\n");
+		ctrl_ext |=
+			E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+		adapter->flags |= IGB_FLAG_MEDIA_RESET;
+		adapter->copper_tries = 0;
+		break;
+	case e1000_media_type_internal_serdes:
+	case e1000_media_type_fiber:
+		netdev_info(adapter->netdev,
+			"MAS: changing media to copper\n");
+		ctrl_ext &=
+			~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+		adapter->flags |= IGB_FLAG_MEDIA_RESET;
+		break;
+	default:
+		/* shouldn't get here during regular operation */
+		netdev_err(adapter->netdev,
+			"AMS: Invalid media type found, returning\n");
+		break;
+	}
+	wr32(E1000_CTRL_EXT, ctrl_ext);
+}
+
+/**
  *  igb_up - Open the interface and prepare it to handle traffic
  *  @adapter: board private structure
  **/
@@ -1623,7 +1704,7 @@ int igb_up(struct igb_adapter *adapter)
 	for (i = 0; i < adapter->num_q_vectors; i++)
 		napi_enable(&(adapter->q_vector[i]->napi));
 
-	if (adapter->msix_entries)
+	if (adapter->flags & IGB_FLAG_HAS_MSIX)
 		igb_configure_msix(adapter);
 	else
 		igb_assign_vector(adapter->q_vector[0], 0);
@@ -1719,6 +1800,37 @@ void igb_reinit_locked(struct igb_adapter *adapter)
 	clear_bit(__IGB_RESETTING, &adapter->state);
 }
 
+/** igb_enable_mas - Media Autosense re-enable after swap
+ *
+ * @adapter: adapter struct
+ **/
+static s32 igb_enable_mas(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 connsw;
+	s32 ret_val = 0;
+
+	connsw = rd32(E1000_CONNSW);
+	if (!(hw->phy.media_type == e1000_media_type_copper))
+		return ret_val;
+
+	/* configure for SerDes media detect */
+	if (!(connsw & E1000_CONNSW_SERDESD)) {
+		connsw |= E1000_CONNSW_ENRGSRC;
+		connsw |= E1000_CONNSW_AUTOSENSE_EN;
+		wr32(E1000_CONNSW, connsw);
+		wrfl();
+	} else if (connsw & E1000_CONNSW_SERDESD) {
+		/* already SerDes, no need to enable anything */
+		return ret_val;
+	} else {
+		netdev_info(adapter->netdev,
+			"MAS: Unable to configure feature, disabling..\n");
+		adapter->flags &= ~IGB_FLAG_MAS_ENABLE;
+	}
+	return ret_val;
+}
+
 void igb_reset(struct igb_adapter *adapter)
 {
 	struct pci_dev *pdev = adapter->pdev;
@@ -1830,6 +1942,16 @@ void igb_reset(struct igb_adapter *adapter)
 	hw->mac.ops.reset_hw(hw);
 	wr32(E1000_WUC, 0);
 
+	if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+		/* need to resetup here after media swap */
+		adapter->ei.get_invariants(hw);
+		adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
+	}
+	if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+		if (igb_enable_mas(adapter))
+			dev_err(&pdev->dev,
+				"Error enabling Media Auto Sense\n");
+	}
 	if (hw->mac.ops.init_hw(hw))
 		dev_err(&pdev->dev, "Hardware Error\n");
 
@@ -1976,6 +2098,58 @@ void igb_set_fw_version(struct igb_adapter *adapter)
 }
 
 /**
+ * igb_init_mas - init Media Autosense feature if enabled in the NVM
+ *
+ * @adapter: adapter struct
+ **/
+static void igb_init_mas(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u16 eeprom_data;
+
+	hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
+	switch (hw->bus.func) {
+	case E1000_FUNC_0:
+		if (eeprom_data & IGB_MAS_ENABLE_0) {
+			adapter->flags |= IGB_FLAG_MAS_ENABLE;
+			netdev_info(adapter->netdev,
+				"MAS: Enabling Media Autosense for port %d\n",
+				hw->bus.func);
+		}
+		break;
+	case E1000_FUNC_1:
+		if (eeprom_data & IGB_MAS_ENABLE_1) {
+			adapter->flags |= IGB_FLAG_MAS_ENABLE;
+			netdev_info(adapter->netdev,
+				"MAS: Enabling Media Autosense for port %d\n",
+				hw->bus.func);
+		}
+		break;
+	case E1000_FUNC_2:
+		if (eeprom_data & IGB_MAS_ENABLE_2) {
+			adapter->flags |= IGB_FLAG_MAS_ENABLE;
+			netdev_info(adapter->netdev,
+				"MAS: Enabling Media Autosense for port %d\n",
+				hw->bus.func);
+		}
+		break;
+	case E1000_FUNC_3:
+		if (eeprom_data & IGB_MAS_ENABLE_3) {
+			adapter->flags |= IGB_FLAG_MAS_ENABLE;
+			netdev_info(adapter->netdev,
+				"MAS: Enabling Media Autosense for port %d\n",
+				hw->bus.func);
+		}
+		break;
+	default:
+		/* Shouldn't get here */
+		netdev_err(adapter->netdev,
+			"MAS: Invalid port configuration, returning\n");
+		break;
+	}
+}
+
+/**
  *  igb_init_i2c - Init I2C interface
  *  @adapter: pointer to adapter structure
  **/
@@ -2022,7 +2196,6 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	s32 ret_val;
 	static int global_quad_port_a; /* global quad port a indication */
 	const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
-	unsigned long mmio_start, mmio_len;
 	int err, pci_using_dac;
 	u8 part_str[E1000_PBANUM_LENGTH];
 
@@ -2079,11 +2252,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	hw->back = adapter;
 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 
-	mmio_start = pci_resource_start(pdev, 0);
-	mmio_len = pci_resource_len(pdev, 0);
-
 	err = -EIO;
-	hw->hw_addr = ioremap(mmio_start, mmio_len);
+	hw->hw_addr = pci_iomap(pdev, 0, 0);
 	if (!hw->hw_addr)
 		goto err_ioremap;
 
@@ -2093,8 +2263,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
 
-	netdev->mem_start = mmio_start;
-	netdev->mem_end = mmio_start + mmio_len;
+	netdev->mem_start = pci_resource_start(pdev, 0);
+	netdev->mem_end = pci_resource_end(pdev, 0);
 
 	/* PCI config space info */
 	hw->vendor_id = pdev->vendor;
@@ -2350,6 +2520,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		adapter->ets = false;
 	}
 #endif
+	/* Check if Media Autosense is enabled */
+	adapter->ei = *ei;
+	if (hw->dev_spec._82575.mas_capable)
+		igb_init_mas(adapter);
+
 	/* do hw tstamp init after resetting */
 	igb_ptp_init(adapter);
 
@@ -2382,7 +2557,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
 	dev_info(&pdev->dev,
 		"Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
-		adapter->msix_entries ? "MSI-X" :
+		(adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
 		(adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
 		adapter->num_rx_queues, adapter->num_tx_queues);
 	switch (hw->mac.type) {
@@ -2470,7 +2645,7 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
 	int err = 0;
 	int i;
 
-	if (!adapter->msix_entries || num_vfs > 7) {
+	if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
 		err = -EPERM;
 		goto out;
 	}
@@ -3935,6 +4110,7 @@ static void igb_watchdog_task(struct work_struct *work)
 	struct net_device *netdev = adapter->netdev;
 	u32 link;
 	int i;
+	u32 connsw;
 
 	link = igb_has_link(adapter);
 
@@ -3945,7 +4121,21 @@ static void igb_watchdog_task(struct work_struct *work)
 			link = false;
 	}
 
+	/* Force link down if we have fiber to swap to */
+	if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+		if (hw->phy.media_type == e1000_media_type_copper) {
+			connsw = rd32(E1000_CONNSW);
+			if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
+				link = 0;
+		}
+	}
 	if (link) {
+		/* Perform a reset if the media type changed. */
+		if (hw->dev_spec._82575.media_changed) {
+			hw->dev_spec._82575.media_changed = false;
+			adapter->flags |= IGB_FLAG_MEDIA_RESET;
+			igb_reset(adapter);
+		}
 		/* Cancel scheduled suspend requests. */
 		pm_runtime_resume(netdev->dev.parent);
 
@@ -4026,8 +4216,27 @@ static void igb_watchdog_task(struct work_struct *work)
 				mod_timer(&adapter->phy_info_timer,
 					  round_jiffies(jiffies + 2 * HZ));
 
+			/* link is down, time to check for alternate media */
+			if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+				igb_check_swap_media(adapter);
+				if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+					schedule_work(&adapter->reset_task);
+					/* return immediately */
+					return;
+				}
+			}
 			pm_schedule_suspend(netdev->dev.parent,
 					    MSEC_PER_SEC * 5);
+
+		/* also check for alternate media here */
+		} else if (!netif_carrier_ok(netdev) &&
+			   (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+			igb_check_swap_media(adapter);
+			if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+				schedule_work(&adapter->reset_task);
+				/* return immediately */
+				return;
+			}
 		}
 	}
 
@@ -4056,7 +4265,7 @@ static void igb_watchdog_task(struct work_struct *work)
 	}
 
 	/* Cause software interrupt to ensure Rx ring is cleaned */
-	if (adapter->msix_entries) {
+	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
 		u32 eics = 0;
 		for (i = 0; i < adapter->num_q_vectors; i++)
 			eics |= adapter->q_vector[i]->eims_value;
@@ -5977,7 +6186,7 @@ static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
 	}
 
 	if (!test_bit(__IGB_DOWN, &adapter->state)) {
-		if (adapter->msix_entries)
+		if (adapter->flags & IGB_FLAG_HAS_MSIX)
 			wr32(E1000_EIMS, q_vector->eims_value);
 		else
 			igb_irq_enable(adapter);
@@ -7344,7 +7553,7 @@ static void igb_netpoll(struct net_device *netdev)
 
 	for (i = 0; i < adapter->num_q_vectors; i++) {
 		q_vector = adapter->q_vector[i];
-		if (adapter->msix_entries)
+		if (adapter->flags & IGB_FLAG_HAS_MSIX)
 			wr32(E1000_EIMC, q_vector->eims_value);
 		else
 			igb_irq_disable(adapter);
@@ -7842,7 +8051,7 @@ int igb_reinit_queues(struct igb_adapter *adapter)
 	if (netif_running(netdev))
 		igb_close(netdev);
 
-	igb_clear_interrupt_scheme(adapter);
+	igb_reset_interrupt_capability(adapter);
 
 	if (igb_init_interrupt_scheme(adapter, true)) {
 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 04bf22e5ee31..675435fc2e53 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1745,7 +1745,7 @@ static int igbvf_set_mac(struct net_device *netdev, void *p)
 
 	hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
 
-	if (memcmp(addr->sa_data, hw->mac.addr, 6))
+	if (!ether_addr_equal(addr->sa_data, hw->mac.addr))
 		return -EADDRNOTAVAIL;
 
 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index 2224cc2edf13..1180cd59b570 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -33,7 +33,6 @@
 #include <linux/module.h>
 #include <linux/types.h>
 #include <asm/byteorder.h>
-#include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/errno.h>
 #include <linux/ioport.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index f38fc0a343a2..0186ea2969fe 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -424,9 +424,10 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
 #ifdef BP_EXTENDED_STATS
 		q_vector->tx.ring->stats.yields++;
 #endif
-	} else
+	} else {
 		/* we don't care if someone yielded */
 		q_vector->state = IXGBE_QV_STATE_NAPI;
+	}
 	spin_unlock_bh(&q_vector->lock);
 	return rc;
 }
@@ -458,9 +459,10 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
 #ifdef BP_EXTENDED_STATS
 		q_vector->rx.ring->stats.yields++;
 #endif
-	} else
+	} else {
 		/* preserve yield marks */
 		q_vector->state |= IXGBE_QV_STATE_POLL;
+	}
 	spin_unlock_bh(&q_vector->lock);
 	return rc;
 }
@@ -552,8 +554,10 @@ struct hwmon_attr {
 };
 
 struct hwmon_buff {
-	struct device *device;
-	struct hwmon_attr *hwmon_list;
+	struct attribute_group group;
+	const struct attribute_group *groups[2];
+	struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1];
+	struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4];
 	unsigned int n_hwmon;
 };
 #endif /* CONFIG_IXGBE_HWMON */
@@ -583,6 +587,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
 	return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
 }
 
+static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
+{
+	writel(value, ring->tail);
+}
+
 #define IXGBE_RX_DESC(R, i)	    \
 	(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
 #define IXGBE_TX_DESC(R, i)	    \
@@ -740,6 +749,7 @@ struct ixgbe_adapter {
 #ifdef IXGBE_FCOE
 	struct ixgbe_fcoe fcoe;
 #endif /* IXGBE_FCOE */
+	u8 __iomem *io_addr; /* Mainly for iounmap use */
 	u32 wol;
 
 	u16 bd_number;
@@ -775,7 +785,7 @@ struct ixgbe_adapter {
 	u32 vferr_refcount;
 	struct kobject *info_kobj;
 #ifdef CONFIG_IXGBE_HWMON
-	struct hwmon_buff ixgbe_hwmon_buff;
+	struct hwmon_buff *ixgbe_hwmon_buff;
 #endif /* CONFIG_IXGBE_HWMON */
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *ixgbe_dbg_adapter;
@@ -796,6 +806,7 @@ enum ixgbe_state_t {
 	__IXGBE_TESTING,
 	__IXGBE_RESETTING,
 	__IXGBE_DOWN,
+	__IXGBE_REMOVING,
 	__IXGBE_SERVICE_SCHED,
 	__IXGBE_IN_SFP_INIT,
 	__IXGBE_PTP_RUNNING,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 007a0083a636..edda6814108c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -626,7 +626,7 @@ static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
 		goto out;
 	}
 
-	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
 
 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index d259dc76604e..f2e3919750ec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -124,24 +124,65 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
 
-#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#define IXGBE_FAILED_READ_REG 0xffffffffU
 
-#ifndef writeq
-#define writeq(val, addr) writel((u32) (val), addr); \
-    writel((u32) (val >> 32), (addr + 4));
-#endif
+static inline bool ixgbe_removed(void __iomem *addr)
+{
+	return unlikely(!addr);
+}
 
-#define IXGBE_WRITE_REG64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
+void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg);
 
-#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
+static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
+{
+	u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
 
-#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) (\
-    writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
+	if (ixgbe_removed(reg_addr))
+		return;
+	writel(value, reg_addr + reg);
+}
+#define IXGBE_WRITE_REG(a, reg, value) ixgbe_write_reg((a), (reg), (value))
 
-#define IXGBE_READ_REG_ARRAY(a, reg, offset) (\
-    readl((a)->hw_addr + (reg) + ((offset) << 2)))
+#ifndef writeq
+#define writeq writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+	writel((u32)val, addr);
+	writel((u32)(val >> 32), addr + 4);
+}
+#endif
 
-#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
+static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
+{
+	u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+
+	if (ixgbe_removed(reg_addr))
+		return;
+	writeq(value, reg_addr + reg);
+}
+#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value))
+
+static inline u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
+{
+	u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+	u32 value;
+
+	if (ixgbe_removed(reg_addr))
+		return IXGBE_FAILED_READ_REG;
+	value = readl(reg_addr + reg);
+	if (unlikely(value == IXGBE_FAILED_READ_REG))
+		ixgbe_check_remove(hw, reg);
+	return value;
+}
+#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg))
+
+#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \
+		ixgbe_write_reg((a), (reg) + ((offset) << 2), (value))
+
+#define IXGBE_READ_REG_ARRAY(a, reg, offset) \
+		ixgbe_read_reg((a), (reg) + ((offset) << 2))
+
+#define IXGBE_WRITE_FLUSH(a) ixgbe_read_reg((a), IXGBE_STATUS)
 
 #define ixgbe_hw_to_netdev(hw) (((struct ixgbe_adapter *)(hw)->back)->netdev)
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 4e7c9b098b58..043307024c4a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1342,61 +1342,61 @@ static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
 	static const u32 test_pattern[] = {
 		0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
 
+	if (ixgbe_removed(adapter->hw.hw_addr)) {
+		*data = 1;
+		return 1;
+	}
 	for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
-		before = readl(adapter->hw.hw_addr + reg);
-		writel((test_pattern[pat] & write),
-		       (adapter->hw.hw_addr + reg));
-		val = readl(adapter->hw.hw_addr + reg);
+		before = ixgbe_read_reg(&adapter->hw, reg);
+		ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
+		val = ixgbe_read_reg(&adapter->hw, reg);
 		if (val != (test_pattern[pat] & write & mask)) {
 			e_err(drv, "pattern test reg %04X failed: got "
 			      "0x%08X expected 0x%08X\n",
 			      reg, val, (test_pattern[pat] & write & mask));
 			*data = reg;
-			writel(before, adapter->hw.hw_addr + reg);
-			return 1;
+			ixgbe_write_reg(&adapter->hw, reg, before);
+			return true;
 		}
-		writel(before, adapter->hw.hw_addr + reg);
+		ixgbe_write_reg(&adapter->hw, reg, before);
 	}
-	return 0;
+	return false;
 }
 
 static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
 			      u32 mask, u32 write)
 {
 	u32 val, before;
-	before = readl(adapter->hw.hw_addr + reg);
-	writel((write & mask), (adapter->hw.hw_addr + reg));
-	val = readl(adapter->hw.hw_addr + reg);
+
+	if (ixgbe_removed(adapter->hw.hw_addr)) {
+		*data = 1;
+		return 1;
+	}
+	before = ixgbe_read_reg(&adapter->hw, reg);
+	ixgbe_write_reg(&adapter->hw, reg, write & mask);
+	val = ixgbe_read_reg(&adapter->hw, reg);
 	if ((write & mask) != (val & mask)) {
 		e_err(drv, "set/check reg %04X test failed: got 0x%08X "
 		      "expected 0x%08X\n", reg, (val & mask), (write & mask));
 		*data = reg;
-		writel(before, (adapter->hw.hw_addr + reg));
-		return 1;
+		ixgbe_write_reg(&adapter->hw, reg, before);
+		return true;
 	}
-	writel(before, (adapter->hw.hw_addr + reg));
-	return 0;
+	ixgbe_write_reg(&adapter->hw, reg, before);
+	return false;
 }
 
-#define REG_PATTERN_TEST(reg, mask, write)				      \
-	do {								      \
-		if (reg_pattern_test(adapter, data, reg, mask, write))	      \
-			return 1;					      \
-	} while (0)							      \
-
-
-#define REG_SET_AND_CHECK(reg, mask, write)				      \
-	do {								      \
-		if (reg_set_and_check(adapter, data, reg, mask, write))	      \
-			return 1;					      \
-	} while (0)							      \
-
 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
 {
 	const struct ixgbe_reg_test *test;
 	u32 value, before, after;
 	u32 i, toggle;
 
+	if (ixgbe_removed(adapter->hw.hw_addr)) {
+		e_err(drv, "Adapter removed - register test blocked\n");
+		*data = 1;
+		return 1;
+	}
 	switch (adapter->hw.mac.type) {
 	case ixgbe_mac_82598EB:
 		toggle = 0x7FFFF3FF;
@@ -1419,10 +1419,10 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
 	 * tests.  Some bits are read-only, some toggle, and some
 	 * are writeable on newer MACs.
 	 */
-	before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
-	value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
-	IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
-	after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
+	before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
+	value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
+	ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
+	after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
 	if (value != after) {
 		e_err(drv, "failed STATUS register test got: 0x%08X "
 		      "expected: 0x%08X\n", after, value);
@@ -1430,7 +1430,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
 		return 1;
 	}
 	/* restore previous status */
-	IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
+	ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
 
 	/*
 	 * Perform the remainder of the register test, looping through
@@ -1438,38 +1438,47 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
 	 */
 	while (test->reg) {
 		for (i = 0; i < test->array_len; i++) {
+			bool b = false;
+
 			switch (test->test_type) {
 			case PATTERN_TEST:
-				REG_PATTERN_TEST(test->reg + (i * 0x40),
-						 test->mask,
-						 test->write);
+				b = reg_pattern_test(adapter, data,
+						     test->reg + (i * 0x40),
+						     test->mask,
+						     test->write);
 				break;
 			case SET_READ_TEST:
-				REG_SET_AND_CHECK(test->reg + (i * 0x40),
-						  test->mask,
-						  test->write);
+				b = reg_set_and_check(adapter, data,
+						      test->reg + (i * 0x40),
+						      test->mask,
+						      test->write);
 				break;
 			case WRITE_NO_TEST:
-				writel(test->write,
-				       (adapter->hw.hw_addr + test->reg)
-				       + (i * 0x40));
+				ixgbe_write_reg(&adapter->hw,
+						test->reg + (i * 0x40),
+						test->write);
 				break;
 			case TABLE32_TEST:
-				REG_PATTERN_TEST(test->reg + (i * 4),
-						 test->mask,
-						 test->write);
+				b = reg_pattern_test(adapter, data,
+						     test->reg + (i * 4),
+						     test->mask,
+						     test->write);
 				break;
 			case TABLE64_TEST_LO:
-				REG_PATTERN_TEST(test->reg + (i * 8),
-						 test->mask,
-						 test->write);
+				b = reg_pattern_test(adapter, data,
+						     test->reg + (i * 8),
+						     test->mask,
+						     test->write);
 				break;
 			case TABLE64_TEST_HI:
-				REG_PATTERN_TEST((test->reg + 4) + (i * 8),
-						 test->mask,
-						 test->write);
+				b = reg_pattern_test(adapter, data,
+						     (test->reg + 4) + (i * 8),
+						     test->mask,
+						     test->write);
 				break;
 			}
+			if (b)
+				return 1;
 		}
 		test++;
 	}
@@ -1954,6 +1963,15 @@ static void ixgbe_diag_test(struct net_device *netdev,
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	bool if_running = netif_running(netdev);
 
+	if (ixgbe_removed(adapter->hw.hw_addr)) {
+		e_err(hw, "Adapter removed - test blocked\n");
+		data[0] = 1;
+		data[1] = 1;
+		data[2] = 1;
+		data[3] = 1;
+		eth_test->flags |= ETH_TEST_FL_FAILED;
+		return;
+	}
 	set_bit(__IXGBE_TESTING, &adapter->state);
 	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
 		struct ixgbe_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 5bcc870f8367..6d4ada72dfd0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -64,7 +64,7 @@ char ixgbe_default_device_descr[] =
 static char ixgbe_default_device_descr[] =
 			      "Intel(R) 10 Gigabit Network Connection";
 #endif
-#define DRV_VERSION "3.15.1-k"
+#define DRV_VERSION "3.19.1-k"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
 				"Copyright (c) 1999-2013 Intel Corporation.";
@@ -278,10 +278,41 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
 static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
 {
 	if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
+	    !test_bit(__IXGBE_REMOVING, &adapter->state) &&
 	    !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
 		schedule_work(&adapter->service_task);
 }
 
+static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
+{
+	struct ixgbe_adapter *adapter = hw->back;
+
+	if (!hw->hw_addr)
+		return;
+	hw->hw_addr = NULL;
+	e_dev_err("Adapter removed\n");
+	ixgbe_service_event_schedule(adapter);
+}
+
+void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
+{
+	u32 value;
+
+	/* The following check not only optimizes a bit by not
+	 * performing a read on the status register when the
+	 * register just read was a status register read that
+	 * returned IXGBE_FAILED_READ_REG. It also blocks any
+	 * potential recursion.
+	 */
+	if (reg == IXGBE_STATUS) {
+		ixgbe_remove_adapter(hw);
+		return;
+	}
+	value = ixgbe_read_reg(hw, IXGBE_STATUS);
+	if (value == IXGBE_FAILED_READ_REG)
+		ixgbe_remove_adapter(hw);
+}
+
 static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
 {
 	BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
@@ -1314,7 +1345,7 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
 	 * such as IA-64).
 	 */
 	wmb();
-	writel(val, rx_ring->tail);
+	ixgbe_write_tail(rx_ring, val);
 }
 
 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
@@ -2969,7 +3000,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
 			ring->count * sizeof(union ixgbe_adv_tx_desc));
 	IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
 	IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
-	ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
+	ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
 
 	/*
 	 * set WTHRESH to encourage burst writeback, it should not be set
@@ -3308,6 +3339,8 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
 	u32 rxdctl;
 	u8 reg_idx = ring->reg_idx;
 
+	if (ixgbe_removed(hw->hw_addr))
+		return;
 	/* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
 	if (hw->mac.type == ixgbe_mac_82598EB &&
 	    !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
@@ -3332,6 +3365,8 @@ void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
 	u32 rxdctl;
 	u8 reg_idx = ring->reg_idx;
 
+	if (ixgbe_removed(hw->hw_addr))
+		return;
 	rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
 	rxdctl &= ~IXGBE_RXDCTL_ENABLE;
 
@@ -3372,7 +3407,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
 			ring->count * sizeof(union ixgbe_adv_rx_desc));
 	IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
 	IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
-	ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
+	ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
 
 	ixgbe_configure_srrctl(adapter, ring);
 	ixgbe_configure_rscctl(adapter, ring);
@@ -4572,6 +4607,7 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
 	if (hw->mac.ops.enable_tx_laser)
 		hw->mac.ops.enable_tx_laser(hw);
 
+	smp_mb__before_clear_bit();
 	clear_bit(__IXGBE_DOWN, &adapter->state);
 	ixgbe_napi_enable_all(adapter);
 
@@ -4656,6 +4692,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
 	struct ixgbe_hw *hw = &adapter->hw;
 	int err;
 
+	if (ixgbe_removed(hw->hw_addr))
+		return;
 	/* lock SFP init bit to prevent race conditions with the watchdog */
 	while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
 		usleep_range(1000, 2000);
@@ -4783,7 +4821,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
 	int i;
 
 	/* signal that we are down to the interrupt handler */
-	set_bit(__IXGBE_DOWN, &adapter->state);
+	if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
+		return; /* do nothing if already down */
 
 	/* disable receives */
 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -5028,7 +5067,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
 
 	/* assign number of SR-IOV VFs */
 	if (hw->mac.type != ixgbe_mac_82598EB) {
-		if (max_vfs > 63) {
+		if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
 			adapter->num_vfs = 0;
 			e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
 		} else {
@@ -5874,8 +5913,9 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
 	u64 eics = 0;
 	int i;
 
-	/* If we're down or resetting, just bail */
+	/* If we're down, removing or resetting, just bail */
 	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+	    test_bit(__IXGBE_REMOVING, &adapter->state) ||
 	    test_bit(__IXGBE_RESETTING, &adapter->state))
 		return;
 
@@ -6122,8 +6162,9 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
  **/
 static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
 {
-	/* if interface is down do nothing */
+	/* if interface is down, removing or resetting, do nothing */
 	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+	    test_bit(__IXGBE_REMOVING, &adapter->state) ||
 	    test_bit(__IXGBE_RESETTING, &adapter->state))
 		return;
 
@@ -6341,8 +6382,9 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
 
 	adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
 
-	/* If we're already down or resetting, just bail */
+	/* If we're already down, removing or resetting, just bail */
 	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+	    test_bit(__IXGBE_REMOVING, &adapter->state) ||
 	    test_bit(__IXGBE_RESETTING, &adapter->state))
 		return;
 
@@ -6350,7 +6392,9 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
 	netdev_err(adapter->netdev, "Reset adapter\n");
 	adapter->tx_timeout_count++;
 
+	rtnl_lock();
 	ixgbe_reinit_locked(adapter);
+	rtnl_unlock();
 }
 
 /**
@@ -6362,6 +6406,15 @@ static void ixgbe_service_task(struct work_struct *work)
 	struct ixgbe_adapter *adapter = container_of(work,
 						     struct ixgbe_adapter,
 						     service_task);
+	if (ixgbe_removed(adapter->hw.hw_addr)) {
+		if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+			rtnl_lock();
+			ixgbe_down(adapter);
+			rtnl_unlock();
+		}
+		ixgbe_service_event_complete(adapter);
+		return;
+	}
 	ixgbe_reset_subtask(adapter);
 	ixgbe_sfp_detection_subtask(adapter);
 	ixgbe_sfp_link_config_subtask(adapter);
@@ -6693,7 +6746,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
 	tx_ring->next_to_use = i;
 
 	/* notify HW of packet */
-	writel(i, tx_ring->tail);
+	ixgbe_write_tail(tx_ring, i);
 
 	return;
 dma_error:
@@ -7874,6 +7927,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
 			      pci_resource_len(pdev, 0));
+	adapter->io_addr = hw->hw_addr;
 	if (!hw->hw_addr) {
 		err = -EIO;
 		goto err_ioremap;
@@ -7965,8 +8019,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	/* Mailbox */
 	ixgbe_init_mbx_params_pf(hw);
 	memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+	pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
 	ixgbe_enable_sriov(adapter);
-	pci_sriov_set_totalvfs(pdev, 63);
 skip_sriov:
 
 #endif
@@ -8182,7 +8236,7 @@ err_register:
 err_sw_init:
 	ixgbe_disable_sriov(adapter);
 	adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
-	iounmap(hw->hw_addr);
+	iounmap(adapter->io_addr);
 err_ioremap:
 	free_netdev(netdev);
 err_alloc_etherdev:
@@ -8210,7 +8264,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
 
 	ixgbe_dbg_adapter_exit(adapter);
 
-	set_bit(__IXGBE_DOWN, &adapter->state);
+	set_bit(__IXGBE_REMOVING, &adapter->state);
 	cancel_work_sync(&adapter->service_task);
 
 
@@ -8249,7 +8303,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
 	kfree(adapter->ixgbe_ieee_ets);
 
 #endif
-	iounmap(adapter->hw.hw_addr);
+	iounmap(adapter->io_addr);
 	pci_release_selected_regions(pdev, pci_select_bars(pdev,
 				     IORESOURCE_MEM));
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index d4a64e665398..cc3101afd29f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -27,8 +27,7 @@
 
 #include <linux/pci.h>
 #include <linux/delay.h>
-#include "ixgbe_type.h"
-#include "ixgbe_common.h"
+#include "ixgbe.h"
 #include "ixgbe_mbx.h"
 
 /**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 39217e5ff7dc..132557c318f8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -29,7 +29,7 @@
 #include <linux/delay.h>
 #include <linux/sched.h>
 
-#include "ixgbe_common.h"
+#include "ixgbe.h"
 #include "ixgbe_phy.h"
 
 static void ixgbe_i2c_start(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 72084f70adbb..dff0977876f7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -148,7 +148,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
 		 * physical function.  If the user requests greater thn
 		 * 63 VFs then it is an error - reset to default of zero.
 		 */
-		adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63);
+		adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
 
 		err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
 		if (err) {
@@ -257,7 +257,7 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
 	 * PF.  The PCI bus driver already checks for other values out of
 	 * range.
 	 */
-	if (num_vfs > 63) {
+	if (num_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
 		err = -EPERM;
 		goto err_out;
 	}
@@ -631,11 +631,14 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
 
 static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
 {
+	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
 	struct ixgbe_hw *hw = &adapter->hw;
 	unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
 	u32 reg, reg_offset, vf_shift;
 	u32 msgbuf[4] = {0, 0, 0, 0};
 	u8 *addr = (u8 *)(&msgbuf[1]);
+	u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+	int i;
 
 	e_info(probe, "VF Reset msg received from vf %d\n", vf);
 
@@ -654,6 +657,17 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
 	reg |= 1 << vf_shift;
 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
 
+	/* force drop enable for all VF Rx queues */
+	for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) {
+		/* flush previous write */
+		IXGBE_WRITE_FLUSH(hw);
+
+		/* indicate to hardware that we want to set drop enable */
+		reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE;
+		reg |= i <<  IXGBE_QDE_IDX_SHIFT;
+		IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
+	}
+
 	/* enable receive for vf */
 	reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
 	reg |= 1 << vf_shift;
@@ -684,6 +698,15 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
 	reg |= (1 << vf_shift);
 	IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
 
+	/*
+	 * Reset the VFs TDWBAL and TDWBAH registers
+	 * which are not cleared by an FLR
+	 */
+	for (i = 0; i < q_per_pool; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0);
+	}
+
 	/* reply to reset with ack and vf mac address */
 	msgbuf[0] = IXGBE_VF_RESET;
 	if (!is_zero_ether_addr(vf_mac)) {
@@ -717,8 +740,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
 	}
 
 	if (adapter->vfinfo[vf].pf_set_mac &&
-	    memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac,
-		   ETH_ALEN)) {
+	    !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
 		e_warn(drv,
 		       "VF %d attempted to override administratively set MAC address\n"
 		       "Reload the VF driver to resume operations\n",
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 4713f9fc7f46..8bd29190514e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -28,6 +28,11 @@
 #ifndef _IXGBE_SRIOV_H_
 #define _IXGBE_SRIOV_H_
 
+/*  ixgbe driver limit the max number of VFs could be enabled to
+ *  63 (IXGBE_MAX_VF_FUNCTIONS - 1)
+ */
+#define IXGBE_MAX_VFS_DRV_LIMIT  (IXGBE_MAX_VF_FUNCTIONS - 1)
+
 void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
 void ixgbe_msg_task(struct ixgbe_adapter *adapter);
 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index d118def16f35..e74ae3682733 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -111,29 +111,29 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
 	unsigned int n_attr;
 	struct hwmon_attr *ixgbe_attr;
 
-	n_attr = adapter->ixgbe_hwmon_buff.n_hwmon;
-	ixgbe_attr = &adapter->ixgbe_hwmon_buff.hwmon_list[n_attr];
+	n_attr = adapter->ixgbe_hwmon_buff->n_hwmon;
+	ixgbe_attr = &adapter->ixgbe_hwmon_buff->hwmon_list[n_attr];
 
 	switch (type) {
 	case IXGBE_HWMON_TYPE_LOC:
 		ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location;
 		snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
-			 "temp%u_label", offset);
+			 "temp%u_label", offset + 1);
 		break;
 	case IXGBE_HWMON_TYPE_TEMP:
 		ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp;
 		snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
-			 "temp%u_input", offset);
+			 "temp%u_input", offset + 1);
 		break;
 	case IXGBE_HWMON_TYPE_CAUTION:
 		ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh;
 		snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
-			 "temp%u_max", offset);
+			 "temp%u_max", offset + 1);
 		break;
 	case IXGBE_HWMON_TYPE_MAX:
 		ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh;
 		snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
-			 "temp%u_crit", offset);
+			 "temp%u_crit", offset + 1);
 		break;
 	default:
 		rc = -EPERM;
@@ -147,32 +147,17 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
 	ixgbe_attr->dev_attr.store = NULL;
 	ixgbe_attr->dev_attr.attr.mode = S_IRUGO;
 	ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name;
+	sysfs_attr_init(&ixgbe_attr->dev_attr.attr);
 
-	rc = device_create_file(&adapter->pdev->dev,
-				&ixgbe_attr->dev_attr);
+	adapter->ixgbe_hwmon_buff->attrs[n_attr] = &ixgbe_attr->dev_attr.attr;
 
-	if (rc == 0)
-		++adapter->ixgbe_hwmon_buff.n_hwmon;
+	++adapter->ixgbe_hwmon_buff->n_hwmon;
 
-	return rc;
+	return 0;
 }
 
 static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter)
 {
-	int i;
-
-	if (adapter == NULL)
-		return;
-
-	for (i = 0; i < adapter->ixgbe_hwmon_buff.n_hwmon; i++) {
-		device_remove_file(&adapter->pdev->dev,
-			   &adapter->ixgbe_hwmon_buff.hwmon_list[i].dev_attr);
-	}
-
-	kfree(adapter->ixgbe_hwmon_buff.hwmon_list);
-
-	if (adapter->ixgbe_hwmon_buff.device)
-		hwmon_device_unregister(adapter->ixgbe_hwmon_buff.device);
 }
 
 /* called from ixgbe_main.c */
@@ -184,9 +169,9 @@ void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter)
 /* called from ixgbe_main.c */
 int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
 {
-	struct hwmon_buff *ixgbe_hwmon = &adapter->ixgbe_hwmon_buff;
+	struct hwmon_buff *ixgbe_hwmon;
+	struct device *hwmon_dev;
 	unsigned int i;
-	int n_attrs;
 	int rc = 0;
 
 	/* If this method isn't defined we don't support thermals */
@@ -198,23 +183,13 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
 	if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw))
 		goto exit;
 
-	/*
-	 * Allocation space for max attributs
-	 * max num sensors * values (loc, temp, max, caution)
-	 */
-	n_attrs = IXGBE_MAX_SENSORS * 4;
-	ixgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
-					  GFP_KERNEL);
-	if (!ixgbe_hwmon->hwmon_list) {
+	ixgbe_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*ixgbe_hwmon),
+				   GFP_KERNEL);
+	if (ixgbe_hwmon == NULL) {
 		rc = -ENOMEM;
-		goto err;
-	}
-
-	ixgbe_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
-	if (IS_ERR(ixgbe_hwmon->device)) {
-		rc = PTR_ERR(ixgbe_hwmon->device);
-		goto err;
+		goto exit;
 	}
+	adapter->ixgbe_hwmon_buff = ixgbe_hwmon;
 
 	for (i = 0; i < IXGBE_MAX_SENSORS; i++) {
 		/*
@@ -226,17 +201,28 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
 
 		/* Bail if any hwmon attr struct fails to initialize */
 		rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION);
-		rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC);
-		rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP);
-		rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX);
 		if (rc)
-			goto err;
+			goto exit;
+		rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC);
+		if (rc)
+			goto exit;
+		rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP);
+		if (rc)
+			goto exit;
+		rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX);
+		if (rc)
+			goto exit;
 	}
 
-	goto exit;
+	ixgbe_hwmon->groups[0] = &ixgbe_hwmon->group;
+	ixgbe_hwmon->group.attrs = ixgbe_hwmon->attrs;
 
-err:
-	ixgbe_sysfs_del_adapter(adapter);
+	hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev,
+							   "ixgbe",
+							   ixgbe_hwmon,
+							   ixgbe_hwmon->groups);
+	if (IS_ERR(hwmon_dev))
+		rc = PTR_ERR(hwmon_dev);
 exit:
 	return rc;
 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 7c19e969576f..0d39cfc4a3bf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1980,9 +1980,10 @@ enum {
 #define IXGBE_FWSM_TS_ENABLED	0x1
 
 /* Queue Drop Enable */
-#define IXGBE_QDE_ENABLE     0x00000001
-#define IXGBE_QDE_IDX_MASK   0x00007F00
-#define IXGBE_QDE_IDX_SHIFT           8
+#define IXGBE_QDE_ENABLE	0x00000001
+#define IXGBE_QDE_IDX_MASK	0x00007F00
+#define IXGBE_QDE_IDX_SHIFT	8
+#define IXGBE_QDE_WRITE		0x00010000
 
 #define IXGBE_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
 #define IXGBE_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
@@ -2173,6 +2174,14 @@ enum {
 #define IXGBE_MBVFICR(_i)		(0x00710 + ((_i) * 4))
 #define IXGBE_VFLRE(_i)		((((_i) & 1) ? 0x001C0 : 0x00600))
 #define IXGBE_VFLREC(_i)		(0x00700 + ((_i) * 4))
+/* Translated register #defines */
+#define IXGBE_PVFTDWBAL(P)	(0x06038 + (0x40 * (P)))
+#define IXGBE_PVFTDWBAH(P)	(0x0603C + (0x40 * (P)))
+
+#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \
+		(IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index)))
+#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \
+		(IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index)))
 
 enum ixgbe_fdir_pballoc_type {
 	IXGBE_FDIR_PBALLOC_NONE = 0,
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 3147795bd135..05e4f32d84f7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -183,6 +183,7 @@ typedef u32 ixgbe_link_speed;
 #define IXGBE_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
 #define IXGBE_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
 #define IXGBE_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+#define IXGBE_TXD_CMD	     (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS)
 
 /* Transmit Descriptor - Advanced */
 union ixgbe_adv_tx_desc {
@@ -277,4 +278,21 @@ struct ixgbe_adv_tx_context_desc {
 #define IXGBE_ERR_RESET_FAILED                  -2
 #define IXGBE_ERR_INVALID_ARGUMENT              -3
 
+/* Transmit Config masks */
+#define IXGBE_TXDCTL_ENABLE		0x02000000 /* Ena specific Tx Queue */
+#define IXGBE_TXDCTL_SWFLSH		0x04000000 /* Tx Desc. wr-bk flushing */
+#define IXGBE_TXDCTL_WTHRESH_SHIFT	16	   /* shift to WTHRESH bits */
+
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN	(1 << 5)  /* Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN	(1 << 6)  /* Rx Desc header ena */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN	(1 << 7)  /* Rx Desc payload ena */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN	(1 << 9)  /* Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN	(1 << 13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN	(1 << 15) /* Rx wr header RO */
+
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN	(1 << 5)  /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN	(1 << 9)  /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN	(1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN	(1 << 13) /* Tx rd data Relax Order */
+
 #endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 54d9acef9c4e..f68b78c732a8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -77,11 +77,11 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
 	{"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
 				  stats.saved_reset_vfgotc)},
 	{"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
+	{"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)},
+	{"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)},
 	{"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
 				   stats.saved_reset_vfmprc)},
-	{"rx_csum_offload_good", IXGBEVF_ZSTAT(hw_csum_rx_good)},
 	{"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
-	{"tx_csum_offload_ctxt", IXGBEVF_ZSTAT(hw_csum_tx_good)},
 #ifdef BP_EXTENDED_STATS
 	{"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
 	{"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
@@ -286,9 +286,9 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
 
 	if (!netif_running(adapter->netdev)) {
 		for (i = 0; i < adapter->num_tx_queues; i++)
-			adapter->tx_ring[i].count = new_tx_count;
+			adapter->tx_ring[i]->count = new_tx_count;
 		for (i = 0; i < adapter->num_rx_queues; i++)
-			adapter->rx_ring[i].count = new_rx_count;
+			adapter->rx_ring[i]->count = new_rx_count;
 		adapter->tx_ring_count = new_tx_count;
 		adapter->rx_ring_count = new_rx_count;
 		goto clear_reset;
@@ -303,20 +303,20 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
 
 		for (i = 0; i < adapter->num_tx_queues; i++) {
 			/* clone ring and setup updated count */
-			tx_ring[i] = adapter->tx_ring[i];
+			tx_ring[i] = *adapter->tx_ring[i];
 			tx_ring[i].count = new_tx_count;
-			err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
-			if (!err)
-				continue;
-			while (i) {
-				i--;
-				ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
-			}
+			err = ixgbevf_setup_tx_resources(&tx_ring[i]);
+			if (err) {
+				while (i) {
+					i--;
+					ixgbevf_free_tx_resources(&tx_ring[i]);
+				}
 
-			vfree(tx_ring);
-			tx_ring = NULL;
+				vfree(tx_ring);
+				tx_ring = NULL;
 
-			goto clear_reset;
+				goto clear_reset;
+			}
 		}
 	}
 
@@ -329,20 +329,20 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
 
 		for (i = 0; i < adapter->num_rx_queues; i++) {
 			/* clone ring and setup updated count */
-			rx_ring[i] = adapter->rx_ring[i];
+			rx_ring[i] = *adapter->rx_ring[i];
 			rx_ring[i].count = new_rx_count;
-			err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
-			if (!err)
-				continue;
-			while (i) {
-				i--;
-				ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
-			}
+			err = ixgbevf_setup_rx_resources(&rx_ring[i]);
+			if (err) {
+				while (i) {
+					i--;
+					ixgbevf_free_rx_resources(&rx_ring[i]);
+				}
 
-			vfree(rx_ring);
-			rx_ring = NULL;
+				vfree(rx_ring);
+				rx_ring = NULL;
 
-			goto clear_reset;
+				goto clear_reset;
+			}
 		}
 	}
 
@@ -352,9 +352,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
 	/* Tx */
 	if (tx_ring) {
 		for (i = 0; i < adapter->num_tx_queues; i++) {
-			ixgbevf_free_tx_resources(adapter,
-						  &adapter->tx_ring[i]);
-			adapter->tx_ring[i] = tx_ring[i];
+			ixgbevf_free_tx_resources(adapter->tx_ring[i]);
+			*adapter->tx_ring[i] = tx_ring[i];
 		}
 		adapter->tx_ring_count = new_tx_count;
 
@@ -365,9 +364,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
 	/* Rx */
 	if (rx_ring) {
 		for (i = 0; i < adapter->num_rx_queues; i++) {
-			ixgbevf_free_rx_resources(adapter,
-						  &adapter->rx_ring[i]);
-			adapter->rx_ring[i] = rx_ring[i];
+			ixgbevf_free_rx_resources(adapter->rx_ring[i]);
+			*adapter->rx_ring[i] = rx_ring[i];
 		}
 		adapter->rx_ring_count = new_rx_count;
 
@@ -382,7 +380,7 @@ clear_reset:
 	/* free Tx resources if Rx error is encountered */
 	if (tx_ring) {
 		for (i = 0; i < adapter->num_tx_queues; i++)
-			ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
+			ixgbevf_free_tx_resources(&tx_ring[i]);
 		vfree(tx_ring);
 	}
 
@@ -413,15 +411,15 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
 	    tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
 
 	for (i = 0; i < adapter->num_rx_queues; i++) {
-		rx_yields += adapter->rx_ring[i].bp_yields;
-		rx_cleaned += adapter->rx_ring[i].bp_cleaned;
-		rx_yields += adapter->rx_ring[i].bp_yields;
+		rx_yields += adapter->rx_ring[i]->stats.yields;
+		rx_cleaned += adapter->rx_ring[i]->stats.cleaned;
+		rx_yields += adapter->rx_ring[i]->stats.yields;
 	}
 
 	for (i = 0; i < adapter->num_tx_queues; i++) {
-		tx_yields += adapter->tx_ring[i].bp_yields;
-		tx_cleaned += adapter->tx_ring[i].bp_cleaned;
-		tx_yields += adapter->tx_ring[i].bp_yields;
+		tx_yields += adapter->tx_ring[i]->stats.yields;
+		tx_cleaned += adapter->tx_ring[i]->stats.cleaned;
+		tx_yields += adapter->tx_ring[i]->stats.yields;
 	}
 
 	adapter->bp_rx_yields = rx_yields;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 8971e2d0a984..54829326bb09 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -46,12 +46,15 @@
 /* wrapper around a pointer to a socket buffer,
  * so a DMA handle can be stored along with the buffer */
 struct ixgbevf_tx_buffer {
-	struct sk_buff *skb;
-	dma_addr_t dma;
-	unsigned long time_stamp;
 	union ixgbe_adv_tx_desc *next_to_watch;
-	u16 length;
-	u16 mapped_as_page;
+	unsigned long time_stamp;
+	struct sk_buff *skb;
+	unsigned int bytecount;
+	unsigned short gso_segs;
+	__be16 protocol;
+	DEFINE_DMA_UNMAP_ADDR(dma);
+	DEFINE_DMA_UNMAP_LEN(len);
+	u32 tx_flags;
 };
 
 struct ixgbevf_rx_buffer {
@@ -59,6 +62,29 @@ struct ixgbevf_rx_buffer {
 	dma_addr_t dma;
 };
 
+struct ixgbevf_stats {
+	u64 packets;
+	u64 bytes;
+#ifdef BP_EXTENDED_STATS
+	u64 yields;
+	u64 misses;
+	u64 cleaned;
+#endif
+};
+
+struct ixgbevf_tx_queue_stats {
+	u64 restart_queue;
+	u64 tx_busy;
+	u64 tx_done_old;
+};
+
+struct ixgbevf_rx_queue_stats {
+	u64 non_eop_descs;
+	u64 alloc_rx_page_failed;
+	u64 alloc_rx_buff_failed;
+	u64 csum_err;
+};
+
 struct ixgbevf_ring {
 	struct ixgbevf_ring *next;
 	struct net_device *netdev;
@@ -70,31 +96,27 @@ struct ixgbevf_ring {
 	unsigned int next_to_use;
 	unsigned int next_to_clean;
 
-	int queue_index; /* needed for multiqueue queue management */
 	union {
 		struct ixgbevf_tx_buffer *tx_buffer_info;
 		struct ixgbevf_rx_buffer *rx_buffer_info;
 	};
 
-	u64			total_bytes;
-	u64			total_packets;
-	struct u64_stats_sync	syncp;
-	u64 hw_csum_rx_error;
-	u64 hw_csum_rx_good;
-#ifdef BP_EXTENDED_STATS
-	u64 bp_yields;
-	u64 bp_misses;
-	u64 bp_cleaned;
-#endif
+	struct ixgbevf_stats stats;
+	struct u64_stats_sync syncp;
+	union {
+		struct ixgbevf_tx_queue_stats tx_stats;
+		struct ixgbevf_rx_queue_stats rx_stats;
+	};
 
-	u16 head;
-	u16 tail;
+	u64 hw_csum_rx_error;
+	u8 __iomem *tail;
 
 	u16 reg_idx; /* holds the special value that gets the hardware register
 		      * offset associated with this ring, which is different
 		      * for DCB and RSS modes */
 
 	u16 rx_buf_len;
+	int queue_index; /* needed for multiqueue queue management */
 };
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -125,8 +147,6 @@ struct ixgbevf_ring {
 #define IXGBE_TX_FLAGS_VLAN		(u32)(1 << 1)
 #define IXGBE_TX_FLAGS_TSO		(u32)(1 << 2)
 #define IXGBE_TX_FLAGS_IPV4		(u32)(1 << 3)
-#define IXGBE_TX_FLAGS_FCOE		(u32)(1 << 4)
-#define IXGBE_TX_FLAGS_FSO		(u32)(1 << 5)
 #define IXGBE_TX_FLAGS_VLAN_MASK	0xffff0000
 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK	0x0000e000
 #define IXGBE_TX_FLAGS_VLAN_SHIFT	16
@@ -188,7 +208,7 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
 		q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
 		rc = false;
 #ifdef BP_EXTENDED_STATS
-		q_vector->tx.ring->bp_yields++;
+		q_vector->tx.ring->stats.yields++;
 #endif
 	} else {
 		/* we don't care if someone yielded */
@@ -223,7 +243,7 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
 		q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
 		rc = false;
 #ifdef BP_EXTENDED_STATS
-		q_vector->rx.ring->bp_yields++;
+		q_vector->rx.ring->stats.yields++;
 #endif
 	} else {
 		/* preserve yield marks */
@@ -262,6 +282,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
 	spin_lock_bh(&q_vector->lock);
 	if (q_vector->state & IXGBEVF_QV_OWNED)
 		rc = false;
+	q_vector->state |= IXGBEVF_QV_STATE_DISABLED;
 	spin_unlock_bh(&q_vector->lock);
 	return rc;
 }
@@ -315,7 +336,6 @@ static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
 struct ixgbevf_adapter {
 	struct timer_list watchdog_timer;
 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
-	u16 bd_number;
 	struct work_struct reset_task;
 	struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
 
@@ -328,25 +348,18 @@ struct ixgbevf_adapter {
 	u32 eims_other;
 
 	/* TX */
-	struct ixgbevf_ring *tx_ring;	/* One per active queue */
 	int num_tx_queues;
+	struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
 	u64 restart_queue;
-	u64 hw_csum_tx_good;
-	u64 lsc_int;
-	u64 hw_tso_ctxt;
-	u64 hw_tso6_ctxt;
 	u32 tx_timeout_count;
 
 	/* RX */
-	struct ixgbevf_ring *rx_ring;	/* One per active queue */
 	int num_rx_queues;
+	struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
 	u64 hw_csum_rx_error;
 	u64 hw_rx_no_dma_resources;
-	u64 hw_csum_rx_good;
 	u64 non_eop_descs;
 	int num_msix_vectors;
-	struct msix_entry *msix_entries;
-
 	u32 alloc_rx_page_failed;
 	u32 alloc_rx_buff_failed;
 
@@ -356,6 +369,9 @@ struct ixgbevf_adapter {
 	u32 flags;
 #define IXGBE_FLAG_IN_WATCHDOG_TASK             (u32)(1)
 #define IXGBE_FLAG_IN_NETPOLL                   (u32)(1 << 1)
+#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED	(u32)(1 << 2)
+
+	struct msix_entry *msix_entries;
 
 	/* OS defined structs */
 	struct net_device *netdev;
@@ -364,10 +380,12 @@ struct ixgbevf_adapter {
 	/* structs defined in ixgbe_vf.h */
 	struct ixgbe_hw hw;
 	u16 msg_enable;
-	struct ixgbevf_hw_stats stats;
+	u16 bd_number;
 	/* Interrupt Throttle Rate */
 	u32 eitr_param;
 
+	struct ixgbevf_hw_stats stats;
+
 	unsigned long state;
 	u64 tx_busy;
 	unsigned int tx_ring_count;
@@ -386,9 +404,9 @@ struct ixgbevf_adapter {
 	u32 link_speed;
 	bool link_up;
 
-	struct work_struct watchdog_task;
-
 	spinlock_t mbx_lock;
+
+	struct work_struct watchdog_task;
 };
 
 enum ixbgevf_state_t {
@@ -420,10 +438,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter);
 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
 void ixgbevf_reset(struct ixgbevf_adapter *adapter);
 void ixgbevf_set_ethtool_ops(struct net_device *netdev);
-int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
-int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
-void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
-void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+int ixgbevf_setup_rx_resources(struct ixgbevf_ring *);
+int ixgbevf_setup_tx_resources(struct ixgbevf_ring *);
+void ixgbevf_free_rx_resources(struct ixgbevf_ring *);
+void ixgbevf_free_tx_resources(struct ixgbevf_ring *);
 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
 int ethtool_ioctl(struct ifreq *ifr);
 
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 92ef4cb5a8e8..9df28985eba7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
 static const char ixgbevf_driver_string[] =
 	"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
 
-#define DRV_VERSION "2.11.3-k"
+#define DRV_VERSION "2.12.1-k"
 const char ixgbevf_driver_version[] = DRV_VERSION;
 static char ixgbevf_copyright[] =
 	"Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -95,13 +95,15 @@ module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 
 /* forward decls */
+static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
 
-static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
-					   struct ixgbevf_ring *rx_ring,
+static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
 					   u32 val)
 {
+	rx_ring->next_to_use = val;
+
 	/*
 	 * Force memory writes to complete before letting h/w
 	 * know there are new descriptors to fetch.  (Only
@@ -109,7 +111,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
 	 * such as IA-64).
 	 */
 	wmb();
-	IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
+	writel(val, rx_ring->tail);
 }
 
 /**
@@ -143,28 +145,25 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
 }
 
 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
-					       struct ixgbevf_tx_buffer
-					       *tx_buffer_info)
-{
-	if (tx_buffer_info->dma) {
-		if (tx_buffer_info->mapped_as_page)
-			dma_unmap_page(tx_ring->dev,
-				       tx_buffer_info->dma,
-				       tx_buffer_info->length,
-				       DMA_TO_DEVICE);
-		else
+					struct ixgbevf_tx_buffer *tx_buffer)
+{
+	if (tx_buffer->skb) {
+		dev_kfree_skb_any(tx_buffer->skb);
+		if (dma_unmap_len(tx_buffer, len))
 			dma_unmap_single(tx_ring->dev,
-					 tx_buffer_info->dma,
-					 tx_buffer_info->length,
+					 dma_unmap_addr(tx_buffer, dma),
+					 dma_unmap_len(tx_buffer, len),
 					 DMA_TO_DEVICE);
-		tx_buffer_info->dma = 0;
+	} else if (dma_unmap_len(tx_buffer, len)) {
+		dma_unmap_page(tx_ring->dev,
+			       dma_unmap_addr(tx_buffer, dma),
+			       dma_unmap_len(tx_buffer, len),
+			       DMA_TO_DEVICE);
 	}
-	if (tx_buffer_info->skb) {
-		dev_kfree_skb_any(tx_buffer_info->skb);
-		tx_buffer_info->skb = NULL;
-	}
-	tx_buffer_info->time_stamp = 0;
-	/* tx_buffer_info must be completely set up in the transmit path */
+	tx_buffer->next_to_watch = NULL;
+	tx_buffer->skb = NULL;
+	dma_unmap_len_set(tx_buffer, len, 0);
+	/* tx_buffer must be completely set up in the transmit path */
 }
 
 #define IXGBE_MAX_TXD_PWR	14
@@ -185,20 +184,21 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
 				 struct ixgbevf_ring *tx_ring)
 {
 	struct ixgbevf_adapter *adapter = q_vector->adapter;
-	union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
-	struct ixgbevf_tx_buffer *tx_buffer_info;
-	unsigned int i, count = 0;
+	struct ixgbevf_tx_buffer *tx_buffer;
+	union ixgbe_adv_tx_desc *tx_desc;
 	unsigned int total_bytes = 0, total_packets = 0;
+	unsigned int budget = tx_ring->count / 2;
+	unsigned int i = tx_ring->next_to_clean;
 
 	if (test_bit(__IXGBEVF_DOWN, &adapter->state))
 		return true;
 
-	i = tx_ring->next_to_clean;
-	tx_buffer_info = &tx_ring->tx_buffer_info[i];
-	eop_desc = tx_buffer_info->next_to_watch;
+	tx_buffer = &tx_ring->tx_buffer_info[i];
+	tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
+	i -= tx_ring->count;
 
 	do {
-		bool cleaned = false;
+		union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
 
 		/* if next_to_watch is not set then there is no work pending */
 		if (!eop_desc)
@@ -212,67 +212,90 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
 			break;
 
 		/* clear next_to_watch to prevent false hangs */
-		tx_buffer_info->next_to_watch = NULL;
+		tx_buffer->next_to_watch = NULL;
 
-		for ( ; !cleaned; count++) {
-			struct sk_buff *skb;
-			tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
-			cleaned = (tx_desc == eop_desc);
-			skb = tx_buffer_info->skb;
-
-			if (cleaned && skb) {
-				unsigned int segs, bytecount;
-
-				/* gso_segs is currently only valid for tcp */
-				segs = skb_shinfo(skb)->gso_segs ?: 1;
-				/* multiply data chunks by size of headers */
-				bytecount = ((segs - 1) * skb_headlen(skb)) +
-					    skb->len;
-				total_packets += segs;
-				total_bytes += bytecount;
-			}
+		/* update the statistics for this packet */
+		total_bytes += tx_buffer->bytecount;
+		total_packets += tx_buffer->gso_segs;
 
-			ixgbevf_unmap_and_free_tx_resource(tx_ring,
-							   tx_buffer_info);
+		/* free the skb */
+		dev_kfree_skb_any(tx_buffer->skb);
 
-			tx_desc->wb.status = 0;
+		/* unmap skb header data */
+		dma_unmap_single(tx_ring->dev,
+				 dma_unmap_addr(tx_buffer, dma),
+				 dma_unmap_len(tx_buffer, len),
+				 DMA_TO_DEVICE);
 
+		/* clear tx_buffer data */
+		tx_buffer->skb = NULL;
+		dma_unmap_len_set(tx_buffer, len, 0);
+
+		/* unmap remaining buffers */
+		while (tx_desc != eop_desc) {
+			tx_buffer++;
+			tx_desc++;
 			i++;
-			if (i == tx_ring->count)
-				i = 0;
+			if (unlikely(!i)) {
+				i -= tx_ring->count;
+				tx_buffer = tx_ring->tx_buffer_info;
+				tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
+			}
 
-			tx_buffer_info = &tx_ring->tx_buffer_info[i];
+			/* unmap any remaining paged data */
+			if (dma_unmap_len(tx_buffer, len)) {
+				dma_unmap_page(tx_ring->dev,
+					       dma_unmap_addr(tx_buffer, dma),
+					       dma_unmap_len(tx_buffer, len),
+					       DMA_TO_DEVICE);
+				dma_unmap_len_set(tx_buffer, len, 0);
+			}
+		}
+
+		/* move us one more past the eop_desc for start of next pkt */
+		tx_buffer++;
+		tx_desc++;
+		i++;
+		if (unlikely(!i)) {
+			i -= tx_ring->count;
+			tx_buffer = tx_ring->tx_buffer_info;
+			tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
 		}
 
-		eop_desc = tx_buffer_info->next_to_watch;
-	} while (count < tx_ring->count);
+		/* issue prefetch for next Tx descriptor */
+		prefetch(tx_desc);
 
+		/* update budget accounting */
+		budget--;
+	} while (likely(budget));
+
+	i += tx_ring->count;
 	tx_ring->next_to_clean = i;
+	u64_stats_update_begin(&tx_ring->syncp);
+	tx_ring->stats.bytes += total_bytes;
+	tx_ring->stats.packets += total_packets;
+	u64_stats_update_end(&tx_ring->syncp);
+	q_vector->tx.total_bytes += total_bytes;
+	q_vector->tx.total_packets += total_packets;
 
 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
-	if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
+	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
 		     (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
 		/* Make sure that anybody stopping the queue after this
 		 * sees the new next_to_clean.
 		 */
 		smp_mb();
+
 		if (__netif_subqueue_stopped(tx_ring->netdev,
 					     tx_ring->queue_index) &&
 		    !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
 			netif_wake_subqueue(tx_ring->netdev,
 					    tx_ring->queue_index);
-			++adapter->restart_queue;
+			++tx_ring->tx_stats.restart_queue;
 		}
 	}
 
-	u64_stats_update_begin(&tx_ring->syncp);
-	tx_ring->total_bytes += total_bytes;
-	tx_ring->total_packets += total_packets;
-	u64_stats_update_end(&tx_ring->syncp);
-	q_vector->tx.total_bytes += total_bytes;
-	q_vector->tx.total_packets += total_packets;
-
-	return count < tx_ring->count;
+	return !!budget;
 }
 
 /**
@@ -341,7 +364,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
 	/* if IP and error */
 	if ((status_err & IXGBE_RXD_STAT_IPCS) &&
 	    (status_err & IXGBE_RXDADV_ERR_IPE)) {
-		ring->hw_csum_rx_error++;
+		ring->rx_stats.csum_err++;
 		return;
 	}
 
@@ -349,51 +372,46 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
 		return;
 
 	if (status_err & IXGBE_RXDADV_ERR_TCPE) {
-		ring->hw_csum_rx_error++;
+		ring->rx_stats.csum_err++;
 		return;
 	}
 
 	/* It must be a TCP or UDP packet with a valid checksum */
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
-	ring->hw_csum_rx_good++;
 }
 
 /**
  * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
  **/
-static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
-				     struct ixgbevf_ring *rx_ring,
+static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
 				     int cleaned_count)
 {
-	struct pci_dev *pdev = adapter->pdev;
 	union ixgbe_adv_rx_desc *rx_desc;
 	struct ixgbevf_rx_buffer *bi;
 	unsigned int i = rx_ring->next_to_use;
 
-	bi = &rx_ring->rx_buffer_info[i];
-
 	while (cleaned_count--) {
 		rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
+		bi = &rx_ring->rx_buffer_info[i];
 
 		if (!bi->skb) {
 			struct sk_buff *skb;
 
 			skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
 							rx_ring->rx_buf_len);
-			if (!skb) {
-				adapter->alloc_rx_buff_failed++;
+			if (!skb)
 				goto no_buffers;
-			}
+
 			bi->skb = skb;
 
-			bi->dma = dma_map_single(&pdev->dev, skb->data,
+			bi->dma = dma_map_single(rx_ring->dev, skb->data,
 						 rx_ring->rx_buf_len,
 						 DMA_FROM_DEVICE);
-			if (dma_mapping_error(&pdev->dev, bi->dma)) {
+			if (dma_mapping_error(rx_ring->dev, bi->dma)) {
 				dev_kfree_skb(skb);
 				bi->skb = NULL;
-				dev_err(&pdev->dev, "RX DMA map failed\n");
+				dev_err(rx_ring->dev, "Rx DMA map failed\n");
 				break;
 			}
 		}
@@ -402,14 +420,12 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
 		i++;
 		if (i == rx_ring->count)
 			i = 0;
-		bi = &rx_ring->rx_buffer_info[i];
 	}
 
 no_buffers:
-	if (rx_ring->next_to_use != i) {
-		rx_ring->next_to_use = i;
-		ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
-	}
+	rx_ring->rx_stats.alloc_rx_buff_failed++;
+	if (rx_ring->next_to_use != i)
+		ixgbevf_release_rx_desc(rx_ring, i);
 }
 
 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
@@ -424,8 +440,6 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
 				struct ixgbevf_ring *rx_ring,
 				int budget)
 {
-	struct ixgbevf_adapter *adapter = q_vector->adapter;
-	struct pci_dev *pdev = adapter->pdev;
 	union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
 	struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
 	struct sk_buff *skb;
@@ -451,7 +465,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
 		rx_buffer_info->skb = NULL;
 
 		if (rx_buffer_info->dma) {
-			dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+			dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
 					 rx_ring->rx_buf_len,
 					 DMA_FROM_DEVICE);
 			rx_buffer_info->dma = 0;
@@ -471,7 +485,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
 		if (!(staterr & IXGBE_RXD_STAT_EOP)) {
 			skb->next = next_buffer->skb;
 			IXGBE_CB(skb->next)->prev = skb;
-			adapter->non_eop_descs++;
+			rx_ring->rx_stats.non_eop_descs++;
 			goto next_desc;
 		}
 
@@ -503,7 +517,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
 		 * source pruning.
 		 */
 		if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
-		    ether_addr_equal(adapter->netdev->dev_addr,
+		    ether_addr_equal(rx_ring->netdev->dev_addr,
 				     eth_hdr(skb)->h_source)) {
 			dev_kfree_skb_irq(skb);
 			goto next_desc;
@@ -516,8 +530,7 @@ next_desc:
 
 		/* return some buffers to hardware, one at a time is too slow */
 		if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
-			ixgbevf_alloc_rx_buffers(adapter, rx_ring,
-						 cleaned_count);
+			ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
 			cleaned_count = 0;
 		}
 
@@ -532,11 +545,11 @@ next_desc:
 	cleaned_count = ixgbevf_desc_unused(rx_ring);
 
 	if (cleaned_count)
-		ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+		ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
 
 	u64_stats_update_begin(&rx_ring->syncp);
-	rx_ring->total_packets += total_rx_packets;
-	rx_ring->total_bytes += total_rx_bytes;
+	rx_ring->stats.packets += total_rx_packets;
+	rx_ring->stats.bytes += total_rx_bytes;
 	u64_stats_update_end(&rx_ring->syncp);
 	q_vector->rx.total_packets += total_rx_packets;
 	q_vector->rx.total_bytes += total_rx_bytes;
@@ -641,9 +654,9 @@ static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
 		found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
 #ifdef BP_EXTENDED_STATS
 		if (found)
-			ring->bp_cleaned += found;
+			ring->stats.cleaned += found;
 		else
-			ring->bp_misses++;
+			ring->stats.misses++;
 #endif
 		if (found)
 			break;
@@ -848,8 +861,8 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
 {
 	struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
 
-	a->rx_ring[r_idx].next = q_vector->rx.ring;
-	q_vector->rx.ring = &a->rx_ring[r_idx];
+	a->rx_ring[r_idx]->next = q_vector->rx.ring;
+	q_vector->rx.ring = a->rx_ring[r_idx];
 	q_vector->rx.count++;
 }
 
@@ -858,8 +871,8 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
 {
 	struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
 
-	a->tx_ring[t_idx].next = q_vector->tx.ring;
-	q_vector->tx.ring = &a->tx_ring[t_idx];
+	a->tx_ring[t_idx]->next = q_vector->tx.ring;
+	q_vector->tx.ring = a->tx_ring[t_idx];
 	q_vector->tx.count++;
 }
 
@@ -1087,6 +1100,70 @@ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
 }
 
 /**
+ * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
+ * @adapter: board private structure
+ * @ring: structure containing ring specific data
+ *
+ * Configure the Tx descriptor ring after a reset.
+ **/
+static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
+				      struct ixgbevf_ring *ring)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u64 tdba = ring->dma;
+	int wait_loop = 10;
+	u32 txdctl = IXGBE_TXDCTL_ENABLE;
+	u8 reg_idx = ring->reg_idx;
+
+	/* disable queue to avoid issues while updating state */
+	IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+	IXGBE_WRITE_FLUSH(hw);
+
+	IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
+	IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
+	IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
+			ring->count * sizeof(union ixgbe_adv_tx_desc));
+
+	/* disable head writeback */
+	IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
+	IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
+
+	/* enable relaxed ordering */
+	IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
+			(IXGBE_DCA_TXCTRL_DESC_RRO_EN |
+			 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
+
+	/* reset head and tail pointers */
+	IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
+	IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
+	ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx);
+
+	/* reset ntu and ntc to place SW in sync with hardwdare */
+	ring->next_to_clean = 0;
+	ring->next_to_use = 0;
+
+	/* In order to avoid issues WTHRESH + PTHRESH should always be equal
+	 * to or less than the number of on chip descriptors, which is
+	 * currently 40.
+	 */
+	txdctl |= (8 << 16);    /* WTHRESH = 8 */
+
+	/* Setting PTHRESH to 32 both improves performance */
+	txdctl |= (1 << 8) |    /* HTHRESH = 1 */
+		  32;          /* PTHRESH = 32 */
+
+	IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
+
+	/* poll to verify queue is enabled */
+	do {
+		usleep_range(1000, 2000);
+		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
+	}  while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
+	if (!wait_loop)
+		pr_err("Could not enable Tx Queue %d\n", reg_idx);
+}
+
+/**
  * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
  * @adapter: board private structure
  *
@@ -1094,31 +1171,11 @@ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
  **/
 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
 {
-	u64 tdba;
-	struct ixgbe_hw *hw = &adapter->hw;
-	u32 i, j, tdlen, txctrl;
+	u32 i;
 
 	/* Setup the HW Tx Head and Tail descriptor pointers */
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		struct ixgbevf_ring *ring = &adapter->tx_ring[i];
-		j = ring->reg_idx;
-		tdba = ring->dma;
-		tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
-		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
-				(tdba & DMA_BIT_MASK(32)));
-		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
-		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
-		IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
-		adapter->tx_ring[i].head = IXGBE_VFTDH(j);
-		adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
-		/* Disable Tx Head Writeback RO bit, since this hoses
-		 * bookkeeping if things aren't delivered in order.
-		 */
-		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
-		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
-		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
-	}
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
 }
 
 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT	2
@@ -1129,7 +1186,7 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 srrctl;
 
-	rx_ring = &adapter->rx_ring[index];
+	rx_ring = adapter->rx_ring[index];
 
 	srrctl = IXGBE_SRRCTL_DROP_EN;
 
@@ -1187,7 +1244,93 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
 		rx_buf_len = IXGBEVF_RXBUFFER_10K;
 
 	for (i = 0; i < adapter->num_rx_queues; i++)
-		adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+		adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
+}
+
+#define IXGBEVF_MAX_RX_DESC_POLL 10
+static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
+				     struct ixgbevf_ring *ring)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+	u32 rxdctl;
+	u8 reg_idx = ring->reg_idx;
+
+	rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+	rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+	/* write value back with RXDCTL.ENABLE bit cleared */
+	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+
+	/* the hardware may take up to 100us to really disable the rx queue */
+	do {
+		udelay(10);
+		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+	} while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+	if (!wait_loop)
+		pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
+		       reg_idx);
+}
+
+static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+					 struct ixgbevf_ring *ring)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+	u32 rxdctl;
+	u8 reg_idx = ring->reg_idx;
+
+	do {
+		usleep_range(1000, 2000);
+		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+	} while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+
+	if (!wait_loop)
+		pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
+		       reg_idx);
+}
+
+static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
+				      struct ixgbevf_ring *ring)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u64 rdba = ring->dma;
+	u32 rxdctl;
+	u8 reg_idx = ring->reg_idx;
+
+	/* disable queue to avoid issues while updating state */
+	rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+	ixgbevf_disable_rx_queue(adapter, ring);
+
+	IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
+	IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
+	IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
+			ring->count * sizeof(union ixgbe_adv_rx_desc));
+
+	/* enable relaxed ordering */
+	IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
+			IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+
+	/* reset head and tail pointers */
+	IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
+	IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
+	ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx);
+
+	/* reset ntu and ntc to place SW in sync with hardwdare */
+	ring->next_to_clean = 0;
+	ring->next_to_use = 0;
+
+	ixgbevf_configure_srrctl(adapter, reg_idx);
+
+	/* prevent DMA from exceeding buffer space available */
+	rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
+	rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN;
+	rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
+	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+
+	ixgbevf_rx_desc_queue_enable(adapter, ring);
+	ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
 }
 
 /**
@@ -1198,33 +1341,17 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
  **/
 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
 {
-	u64 rdba;
-	struct ixgbe_hw *hw = &adapter->hw;
-	int i, j;
-	u32 rdlen;
+	int i;
 
 	ixgbevf_setup_psrtype(adapter);
 
 	/* set_rx_buffer_len must be called before ring initialization */
 	ixgbevf_set_rx_buffer_len(adapter);
 
-	rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
 	 * the Base and Length of the Rx Descriptor Ring */
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		rdba = adapter->rx_ring[i].dma;
-		j = adapter->rx_ring[i].reg_idx;
-		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
-				(rdba & DMA_BIT_MASK(32)));
-		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
-		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
-		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
-		adapter->rx_ring[i].head = IXGBE_VFRDH(j);
-		adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
-
-		ixgbevf_configure_srrctl(adapter, j);
-	}
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
 }
 
 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
@@ -1366,69 +1493,54 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
 	}
 }
 
-static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
+static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
 {
-	struct net_device *netdev = adapter->netdev;
-	int i;
+	struct ixgbe_hw *hw = &adapter->hw;
+	unsigned int def_q = 0;
+	unsigned int num_tcs = 0;
+	unsigned int num_rx_queues = 1;
+	int err;
 
-	ixgbevf_set_rx_mode(netdev);
+	spin_lock_bh(&adapter->mbx_lock);
 
-	ixgbevf_restore_vlan(adapter);
+	/* fetch queue configuration from the PF */
+	err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
 
-	ixgbevf_configure_tx(adapter);
-	ixgbevf_configure_rx(adapter);
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		struct ixgbevf_ring *ring = &adapter->rx_ring[i];
-		ixgbevf_alloc_rx_buffers(adapter, ring,
-					 ixgbevf_desc_unused(ring));
-	}
-}
+	spin_unlock_bh(&adapter->mbx_lock);
 
-#define IXGBEVF_MAX_RX_DESC_POLL 10
-static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
-					 int rxr)
-{
-	struct ixgbe_hw *hw = &adapter->hw;
-	int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
-	u32 rxdctl;
-	int j = adapter->rx_ring[rxr].reg_idx;
+	if (err)
+		return err;
 
-	do {
-		usleep_range(1000, 2000);
-		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
-	} while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+	if (num_tcs > 1) {
+		/* update default Tx ring register index */
+		adapter->tx_ring[0]->reg_idx = def_q;
 
-	if (!wait_loop)
-		hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
-		       rxr);
+		/* we need as many queues as traffic classes */
+		num_rx_queues = num_tcs;
+	}
+
+	/* if we have a bad config abort request queue reset */
+	if (adapter->num_rx_queues != num_rx_queues) {
+		/* force mailbox timeout to prevent further messages */
+		hw->mbx.timeout = 0;
+
+		/* wait for watchdog to come around and bail us out */
+		adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
+	}
 
-	ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
-				(adapter->rx_ring[rxr].count - 1));
+	return 0;
 }
 
-static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
-				     struct ixgbevf_ring *ring)
+static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
 {
-	struct ixgbe_hw *hw = &adapter->hw;
-	int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
-	u32 rxdctl;
-	u8 reg_idx = ring->reg_idx;
-
-	rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
-	rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+	ixgbevf_configure_dcb(adapter);
 
-	/* write value back with RXDCTL.ENABLE bit cleared */
-	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+	ixgbevf_set_rx_mode(adapter->netdev);
 
-	/* the hardware may take up to 100us to really disable the rx queue */
-	do {
-		udelay(10);
-		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
-	} while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+	ixgbevf_restore_vlan(adapter);
 
-	if (!wait_loop)
-		hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
-		       reg_idx);
+	ixgbevf_configure_tx(adapter);
+	ixgbevf_configure_rx(adapter);
 }
 
 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1493,37 +1605,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
 	struct ixgbe_hw *hw = &adapter->hw;
-	int i, j = 0;
-	int num_rx_rings = adapter->num_rx_queues;
-	u32 txdctl, rxdctl;
-
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		j = adapter->tx_ring[i].reg_idx;
-		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
-		/* enable WTHRESH=8 descriptors, to encourage burst writeback */
-		txdctl |= (8 << 16);
-		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
-	}
-
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		j = adapter->tx_ring[i].reg_idx;
-		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
-		txdctl |= IXGBE_TXDCTL_ENABLE;
-		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
-	}
-
-	for (i = 0; i < num_rx_rings; i++) {
-		j = adapter->rx_ring[i].reg_idx;
-		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
-		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
-		if (hw->mac.type == ixgbe_mac_X540_vf) {
-			rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
-			rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
-				   IXGBE_RXDCTL_RLPML_EN);
-		}
-		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
-		ixgbevf_rx_desc_queue_enable(adapter, i);
-	}
 
 	ixgbevf_configure_msix(adapter);
 
@@ -1549,85 +1630,10 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
 	mod_timer(&adapter->watchdog_timer, jiffies);
 }
 
-static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
-{
-	struct ixgbe_hw *hw = &adapter->hw;
-	struct ixgbevf_ring *rx_ring;
-	unsigned int def_q = 0;
-	unsigned int num_tcs = 0;
-	unsigned int num_rx_queues = 1;
-	int err, i;
-
-	spin_lock_bh(&adapter->mbx_lock);
-
-	/* fetch queue configuration from the PF */
-	err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
-
-	spin_unlock_bh(&adapter->mbx_lock);
-
-	if (err)
-		return err;
-
-	if (num_tcs > 1) {
-		/* update default Tx ring register index */
-		adapter->tx_ring[0].reg_idx = def_q;
-
-		/* we need as many queues as traffic classes */
-		num_rx_queues = num_tcs;
-	}
-
-	/* nothing to do if we have the correct number of queues */
-	if (adapter->num_rx_queues == num_rx_queues)
-		return 0;
-
-	/* allocate new rings */
-	rx_ring = kcalloc(num_rx_queues,
-			  sizeof(struct ixgbevf_ring), GFP_KERNEL);
-	if (!rx_ring)
-		return -ENOMEM;
-
-	/* setup ring fields */
-	for (i = 0; i < num_rx_queues; i++) {
-		rx_ring[i].count = adapter->rx_ring_count;
-		rx_ring[i].queue_index = i;
-		rx_ring[i].reg_idx = i;
-		rx_ring[i].dev = &adapter->pdev->dev;
-		rx_ring[i].netdev = adapter->netdev;
-
-		/* allocate resources on the ring */
-		err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
-		if (err) {
-			while (i) {
-				i--;
-				ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
-			}
-			kfree(rx_ring);
-			return err;
-		}
-	}
-
-	/* free the existing rings and queues */
-	ixgbevf_free_all_rx_resources(adapter);
-	adapter->num_rx_queues = 0;
-	kfree(adapter->rx_ring);
-
-	/* move new rings into position on the adapter struct */
-	adapter->rx_ring = rx_ring;
-	adapter->num_rx_queues = num_rx_queues;
-
-	/* reset ring to vector mapping */
-	ixgbevf_reset_q_vectors(adapter);
-	ixgbevf_map_rings_to_vectors(adapter);
-
-	return 0;
-}
-
 void ixgbevf_up(struct ixgbevf_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 
-	ixgbevf_reset_queues(adapter);
-
 	ixgbevf_configure(adapter);
 
 	ixgbevf_up_complete(adapter);
@@ -1640,13 +1646,10 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
 
 /**
  * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
  * @rx_ring: ring to free buffers from
  **/
-static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
-				  struct ixgbevf_ring *rx_ring)
+static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
 {
-	struct pci_dev *pdev = adapter->pdev;
 	unsigned long size;
 	unsigned int i;
 
@@ -1659,7 +1662,7 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
 
 		rx_buffer_info = &rx_ring->rx_buffer_info[i];
 		if (rx_buffer_info->dma) {
-			dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+			dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
 					 rx_ring->rx_buf_len,
 					 DMA_FROM_DEVICE);
 			rx_buffer_info->dma = 0;
@@ -1680,23 +1683,13 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
 
 	/* Zero out the descriptor ring */
 	memset(rx_ring->desc, 0, rx_ring->size);
-
-	rx_ring->next_to_clean = 0;
-	rx_ring->next_to_use = 0;
-
-	if (rx_ring->head)
-		writel(0, adapter->hw.hw_addr + rx_ring->head);
-	if (rx_ring->tail)
-		writel(0, adapter->hw.hw_addr + rx_ring->tail);
 }
 
 /**
  * ixgbevf_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
  * @tx_ring: ring to be cleaned
  **/
-static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
-				  struct ixgbevf_ring *tx_ring)
+static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
 {
 	struct ixgbevf_tx_buffer *tx_buffer_info;
 	unsigned long size;
@@ -1715,14 +1708,6 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
 	memset(tx_ring->tx_buffer_info, 0, size);
 
 	memset(tx_ring->desc, 0, tx_ring->size);
-
-	tx_ring->next_to_use = 0;
-	tx_ring->next_to_clean = 0;
-
-	if (tx_ring->head)
-		writel(0, adapter->hw.hw_addr + tx_ring->head);
-	if (tx_ring->tail)
-		writel(0, adapter->hw.hw_addr + tx_ring->tail);
 }
 
 /**
@@ -1734,7 +1719,7 @@ static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
 	int i;
 
 	for (i = 0; i < adapter->num_rx_queues; i++)
-		ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+		ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
 }
 
 /**
@@ -1746,22 +1731,21 @@ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
 	int i;
 
 	for (i = 0; i < adapter->num_tx_queues; i++)
-		ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+		ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
 }
 
 void ixgbevf_down(struct ixgbevf_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
 	struct ixgbe_hw *hw = &adapter->hw;
-	u32 txdctl;
-	int i, j;
+	int i;
 
 	/* signal that we are down to the interrupt handler */
 	set_bit(__IXGBEVF_DOWN, &adapter->state);
 
 	/* disable all enabled rx queues */
 	for (i = 0; i < adapter->num_rx_queues; i++)
-		ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
+		ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
 
 	netif_tx_disable(netdev);
 
@@ -1782,10 +1766,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
 
 	/* disable transmits in the hardware now that interrupts are off */
 	for (i = 0; i < adapter->num_tx_queues; i++) {
-		j = adapter->tx_ring[i].reg_idx;
-		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
-		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
-				(txdctl & ~IXGBE_TXDCTL_ENABLE));
+		u8 reg_idx = adapter->tx_ring[i]->reg_idx;
+
+		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
+				IXGBE_TXDCTL_SWFLSH);
 	}
 
 	netif_carrier_off(netdev);
@@ -1889,9 +1873,28 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
  **/
 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
 {
+	struct ixgbe_hw *hw = &adapter->hw;
+	unsigned int def_q = 0;
+	unsigned int num_tcs = 0;
+	int err;
+
 	/* Start with base case */
 	adapter->num_rx_queues = 1;
 	adapter->num_tx_queues = 1;
+
+	spin_lock_bh(&adapter->mbx_lock);
+
+	/* fetch queue configuration from the PF */
+	err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
+
+	spin_unlock_bh(&adapter->mbx_lock);
+
+	if (err)
+		return;
+
+	/* we need as many queues as traffic classes */
+	if (num_tcs > 1)
+		adapter->num_rx_queues = num_tcs;
 }
 
 /**
@@ -1904,40 +1907,50 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
  **/
 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
 {
-	int i;
+	struct ixgbevf_ring *ring;
+	int rx = 0, tx = 0;
 
-	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
-				   sizeof(struct ixgbevf_ring), GFP_KERNEL);
-	if (!adapter->tx_ring)
-		goto err_tx_ring_allocation;
+	for (; tx < adapter->num_tx_queues; tx++) {
+		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+		if (!ring)
+			goto err_allocation;
 
-	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
-				   sizeof(struct ixgbevf_ring), GFP_KERNEL);
-	if (!adapter->rx_ring)
-		goto err_rx_ring_allocation;
+		ring->dev = &adapter->pdev->dev;
+		ring->netdev = adapter->netdev;
+		ring->count = adapter->tx_ring_count;
+		ring->queue_index = tx;
+		ring->reg_idx = tx;
 
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		adapter->tx_ring[i].count = adapter->tx_ring_count;
-		adapter->tx_ring[i].queue_index = i;
-		/* reg_idx may be remapped later by DCB config */
-		adapter->tx_ring[i].reg_idx = i;
-		adapter->tx_ring[i].dev = &adapter->pdev->dev;
-		adapter->tx_ring[i].netdev = adapter->netdev;
+		adapter->tx_ring[tx] = ring;
 	}
 
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		adapter->rx_ring[i].count = adapter->rx_ring_count;
-		adapter->rx_ring[i].queue_index = i;
-		adapter->rx_ring[i].reg_idx = i;
-		adapter->rx_ring[i].dev = &adapter->pdev->dev;
-		adapter->rx_ring[i].netdev = adapter->netdev;
+	for (; rx < adapter->num_rx_queues; rx++) {
+		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+		if (!ring)
+			goto err_allocation;
+
+		ring->dev = &adapter->pdev->dev;
+		ring->netdev = adapter->netdev;
+
+		ring->count = adapter->rx_ring_count;
+		ring->queue_index = rx;
+		ring->reg_idx = rx;
+
+		adapter->rx_ring[rx] = ring;
 	}
 
 	return 0;
 
-err_rx_ring_allocation:
-	kfree(adapter->tx_ring);
-err_tx_ring_allocation:
+err_allocation:
+	while (tx) {
+		kfree(adapter->tx_ring[--tx]);
+		adapter->tx_ring[tx] = NULL;
+	}
+
+	while (rx) {
+		kfree(adapter->rx_ring[--rx]);
+		adapter->rx_ring[rx] = NULL;
+	}
 	return -ENOMEM;
 }
 
@@ -2128,6 +2141,17 @@ err_set_interrupt:
  **/
 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
 {
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		kfree(adapter->tx_ring[i]);
+		adapter->tx_ring[i] = NULL;
+	}
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		kfree(adapter->rx_ring[i]);
+		adapter->rx_ring[i] = NULL;
+	}
+
 	adapter->num_tx_queues = 0;
 	adapter->num_rx_queues = 0;
 
@@ -2258,11 +2282,8 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
 
 	for (i = 0;  i  < adapter->num_rx_queues;  i++) {
 		adapter->hw_csum_rx_error +=
-			adapter->rx_ring[i].hw_csum_rx_error;
-		adapter->hw_csum_rx_good +=
-			adapter->rx_ring[i].hw_csum_rx_good;
-		adapter->rx_ring[i].hw_csum_rx_error = 0;
-		adapter->rx_ring[i].hw_csum_rx_good = 0;
+			adapter->rx_ring[i]->hw_csum_rx_error;
+		adapter->rx_ring[i]->hw_csum_rx_error = 0;
 	}
 }
 
@@ -2340,6 +2361,8 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
 	bool link_up = adapter->link_up;
 	s32 need_reset;
 
+	ixgbevf_queue_reset_subtask(adapter);
+
 	adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
 
 	/*
@@ -2408,22 +2431,22 @@ pf_has_reset:
 
 /**
  * ixgbevf_free_tx_resources - Free Tx Resources per Queue
- * @adapter: board private structure
  * @tx_ring: Tx descriptor ring for a specific queue
  *
  * Free all transmit software resources
  **/
-void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
-			       struct ixgbevf_ring *tx_ring)
+void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
 {
-	struct pci_dev *pdev = adapter->pdev;
-
-	ixgbevf_clean_tx_ring(adapter, tx_ring);
+	ixgbevf_clean_tx_ring(tx_ring);
 
 	vfree(tx_ring->tx_buffer_info);
 	tx_ring->tx_buffer_info = NULL;
 
-	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+	/* if not set, then don't free */
+	if (!tx_ring->desc)
+		return;
+
+	dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
 			  tx_ring->dma);
 
 	tx_ring->desc = NULL;
@@ -2440,23 +2463,18 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
 	int i;
 
 	for (i = 0; i < adapter->num_tx_queues; i++)
-		if (adapter->tx_ring[i].desc)
-			ixgbevf_free_tx_resources(adapter,
-						  &adapter->tx_ring[i]);
-
+		if (adapter->tx_ring[i]->desc)
+			ixgbevf_free_tx_resources(adapter->tx_ring[i]);
 }
 
 /**
  * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
  * @tx_ring:    tx descriptor ring (for a specific queue) to setup
  *
  * Return 0 on success, negative on failure
  **/
-int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
-			       struct ixgbevf_ring *tx_ring)
+int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
 {
-	struct pci_dev *pdev = adapter->pdev;
 	int size;
 
 	size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
@@ -2468,13 +2486,11 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
 	tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
 	tx_ring->size = ALIGN(tx_ring->size, 4096);
 
-	tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+	tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
 					   &tx_ring->dma, GFP_KERNEL);
 	if (!tx_ring->desc)
 		goto err;
 
-	tx_ring->next_to_use = 0;
-	tx_ring->next_to_clean = 0;
 	return 0;
 
 err:
@@ -2500,7 +2516,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
 	int i, err = 0;
 
 	for (i = 0; i < adapter->num_tx_queues; i++) {
-		err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+		err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
 		if (!err)
 			continue;
 		hw_dbg(&adapter->hw,
@@ -2513,40 +2529,34 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
 
 /**
  * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
  *
  * Returns 0 on success, negative on failure
  **/
-int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
-			       struct ixgbevf_ring *rx_ring)
+int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
 {
-	struct pci_dev *pdev = adapter->pdev;
 	int size;
 
 	size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
 	rx_ring->rx_buffer_info = vzalloc(size);
 	if (!rx_ring->rx_buffer_info)
-		goto alloc_failed;
+		goto err;
 
 	/* Round up to nearest 4K */
 	rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
 	rx_ring->size = ALIGN(rx_ring->size, 4096);
 
-	rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+	rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
 					   &rx_ring->dma, GFP_KERNEL);
 
-	if (!rx_ring->desc) {
-		vfree(rx_ring->rx_buffer_info);
-		rx_ring->rx_buffer_info = NULL;
-		goto alloc_failed;
-	}
-
-	rx_ring->next_to_clean = 0;
-	rx_ring->next_to_use = 0;
+	if (!rx_ring->desc)
+		goto err;
 
 	return 0;
-alloc_failed:
+err:
+	vfree(rx_ring->rx_buffer_info);
+	rx_ring->rx_buffer_info = NULL;
+	dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
 	return -ENOMEM;
 }
 
@@ -2565,7 +2575,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
 	int i, err = 0;
 
 	for (i = 0; i < adapter->num_rx_queues; i++) {
-		err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+		err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
 		if (!err)
 			continue;
 		hw_dbg(&adapter->hw,
@@ -2577,22 +2587,18 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
 
 /**
  * ixgbevf_free_rx_resources - Free Rx Resources
- * @adapter: board private structure
  * @rx_ring: ring to clean the resources from
  *
  * Free all receive software resources
  **/
-void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
-			       struct ixgbevf_ring *rx_ring)
+void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
 {
-	struct pci_dev *pdev = adapter->pdev;
-
-	ixgbevf_clean_rx_ring(adapter, rx_ring);
+	ixgbevf_clean_rx_ring(rx_ring);
 
 	vfree(rx_ring->rx_buffer_info);
 	rx_ring->rx_buffer_info = NULL;
 
-	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+	dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
 			  rx_ring->dma);
 
 	rx_ring->desc = NULL;
@@ -2609,66 +2615,8 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
 	int i;
 
 	for (i = 0; i < adapter->num_rx_queues; i++)
-		if (adapter->rx_ring[i].desc)
-			ixgbevf_free_rx_resources(adapter,
-						  &adapter->rx_ring[i]);
-}
-
-static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
-{
-	struct ixgbe_hw *hw = &adapter->hw;
-	struct ixgbevf_ring *rx_ring;
-	unsigned int def_q = 0;
-	unsigned int num_tcs = 0;
-	unsigned int num_rx_queues = 1;
-	int err, i;
-
-	spin_lock_bh(&adapter->mbx_lock);
-
-	/* fetch queue configuration from the PF */
-	err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
-
-	spin_unlock_bh(&adapter->mbx_lock);
-
-	if (err)
-		return err;
-
-	if (num_tcs > 1) {
-		/* update default Tx ring register index */
-		adapter->tx_ring[0].reg_idx = def_q;
-
-		/* we need as many queues as traffic classes */
-		num_rx_queues = num_tcs;
-	}
-
-	/* nothing to do if we have the correct number of queues */
-	if (adapter->num_rx_queues == num_rx_queues)
-		return 0;
-
-	/* allocate new rings */
-	rx_ring = kcalloc(num_rx_queues,
-			  sizeof(struct ixgbevf_ring), GFP_KERNEL);
-	if (!rx_ring)
-		return -ENOMEM;
-
-	/* setup ring fields */
-	for (i = 0; i < num_rx_queues; i++) {
-		rx_ring[i].count = adapter->rx_ring_count;
-		rx_ring[i].queue_index = i;
-		rx_ring[i].reg_idx = i;
-		rx_ring[i].dev = &adapter->pdev->dev;
-		rx_ring[i].netdev = adapter->netdev;
-	}
-
-	/* free the existing ring and queues */
-	adapter->num_rx_queues = 0;
-	kfree(adapter->rx_ring);
-
-	/* move new rings into position on the adapter struct */
-	adapter->rx_ring = rx_ring;
-	adapter->num_rx_queues = num_rx_queues;
-
-	return 0;
+		if (adapter->rx_ring[i]->desc)
+			ixgbevf_free_rx_resources(adapter->rx_ring[i]);
 }
 
 /**
@@ -2714,11 +2662,6 @@ static int ixgbevf_open(struct net_device *netdev)
 		}
 	}
 
-	/* setup queue reg_idx and Rx queue count */
-	err = ixgbevf_setup_queues(adapter);
-	if (err)
-		goto err_setup_queues;
-
 	/* allocate transmit descriptors */
 	err = ixgbevf_setup_all_tx_resources(adapter);
 	if (err)
@@ -2756,7 +2699,6 @@ err_setup_rx:
 	ixgbevf_free_all_rx_resources(adapter);
 err_setup_tx:
 	ixgbevf_free_all_tx_resources(adapter);
-err_setup_queues:
 	ixgbevf_reset(adapter);
 
 err_setup_reset:
@@ -2788,6 +2730,34 @@ static int ixgbevf_close(struct net_device *netdev)
 	return 0;
 }
 
+static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
+{
+	struct net_device *dev = adapter->netdev;
+
+	if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
+		return;
+
+	adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
+
+	/* if interface is down do nothing */
+	if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+	    test_bit(__IXGBEVF_RESETTING, &adapter->state))
+		return;
+
+	/* Hardware has to reinitialize queues and interrupts to
+	 * match packet buffer alignment. Unfortunately, the
+	 * hardware is not flexible enough to do this dynamically.
+	 */
+	if (netif_running(dev))
+		ixgbevf_close(dev);
+
+	ixgbevf_clear_interrupt_scheme(adapter);
+	ixgbevf_init_interrupt_scheme(adapter);
+
+	if (netif_running(dev))
+		ixgbevf_open(dev);
+}
+
 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
 				u32 vlan_macip_lens, u32 type_tucmd,
 				u32 mss_l4len_idx)
@@ -2810,8 +2780,10 @@ static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
 }
 
 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
-		       struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+		       struct ixgbevf_tx_buffer *first,
+		       u8 *hdr_len)
 {
+	struct sk_buff *skb = first->skb;
 	u32 vlan_macip_lens, type_tucmd;
 	u32 mss_l4len_idx, l4len;
 
@@ -2836,12 +2808,17 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
 							 IPPROTO_TCP,
 							 0);
 		type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+		first->tx_flags |= IXGBE_TX_FLAGS_TSO |
+				   IXGBE_TX_FLAGS_CSUM |
+				   IXGBE_TX_FLAGS_IPV4;
 	} else if (skb_is_gso_v6(skb)) {
 		ipv6_hdr(skb)->payload_len = 0;
 		tcp_hdr(skb)->check =
 		    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
 				     &ipv6_hdr(skb)->daddr,
 				     0, IPPROTO_TCP, 0);
+		first->tx_flags |= IXGBE_TX_FLAGS_TSO |
+				   IXGBE_TX_FLAGS_CSUM;
 	}
 
 	/* compute header lengths */
@@ -2849,6 +2826,10 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
 	*hdr_len += l4len;
 	*hdr_len = skb_transport_offset(skb) + l4len;
 
+	/* update gso size and bytecount with header size */
+	first->gso_segs = skb_shinfo(skb)->gso_segs;
+	first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
 	/* mss_l4len_id: use 1 as index for TSO */
 	mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
 	mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
@@ -2857,7 +2838,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
 	/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
 	vlan_macip_lens = skb_network_header_len(skb);
 	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
-	vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+	vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 
 	ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
 			    type_tucmd, mss_l4len_idx);
@@ -2865,9 +2846,10 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
 	return 1;
 }
 
-static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
-			    struct sk_buff *skb, u32 tx_flags)
+static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
+			    struct ixgbevf_tx_buffer *first)
 {
+	struct sk_buff *skb = first->skb;
 	u32 vlan_macip_lens = 0;
 	u32 mss_l4len_idx = 0;
 	u32 type_tucmd = 0;
@@ -2888,7 +2870,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
 			if (unlikely(net_ratelimit())) {
 				dev_warn(tx_ring->dev,
 				 "partial checksum but proto=%x!\n",
-				 skb->protocol);
+				 first->protocol);
 			}
 			break;
 		}
@@ -2916,184 +2898,190 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
 			}
 			break;
 		}
+
+		/* update TX checksum flag */
+		first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
 	}
 
 	/* vlan_macip_lens: MACLEN, VLAN tag */
 	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
-	vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+	vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 
 	ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
 			    type_tucmd, mss_l4len_idx);
-
-	return (skb->ip_summed == CHECKSUM_PARTIAL);
 }
 
-static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
-			  struct sk_buff *skb, u32 tx_flags)
+static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
 {
-	struct ixgbevf_tx_buffer *tx_buffer_info;
-	unsigned int len;
-	unsigned int total = skb->len;
-	unsigned int offset = 0, size;
-	int count = 0;
-	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
-	unsigned int f;
-	int i;
+	/* set type for advanced descriptor with frame checksum insertion */
+	__le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
+				      IXGBE_ADVTXD_DCMD_IFCS |
+				      IXGBE_ADVTXD_DCMD_DEXT);
 
-	i = tx_ring->next_to_use;
+	/* set HW vlan bit if vlan is present */
+	if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+		cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
 
-	len = min(skb_headlen(skb), total);
-	while (len) {
-		tx_buffer_info = &tx_ring->tx_buffer_info[i];
-		size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
-
-		tx_buffer_info->length = size;
-		tx_buffer_info->mapped_as_page = false;
-		tx_buffer_info->dma = dma_map_single(tx_ring->dev,
-						     skb->data + offset,
-						     size, DMA_TO_DEVICE);
-		if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
-			goto dma_error;
+	/* set segmentation enable bits for TSO/FSO */
+	if (tx_flags & IXGBE_TX_FLAGS_TSO)
+		cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
 
-		len -= size;
-		total -= size;
-		offset += size;
-		count++;
-		i++;
-		if (i == tx_ring->count)
-			i = 0;
-	}
+	return cmd_type;
+}
 
-	for (f = 0; f < nr_frags; f++) {
-		const struct skb_frag_struct *frag;
+static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
+				     u32 tx_flags, unsigned int paylen)
+{
+	__le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
 
-		frag = &skb_shinfo(skb)->frags[f];
-		len = min((unsigned int)skb_frag_size(frag), total);
-		offset = 0;
+	/* enable L4 checksum for TSO and TX checksum offload */
+	if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+		olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
 
-		while (len) {
-			tx_buffer_info = &tx_ring->tx_buffer_info[i];
-			size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+	/* enble IPv4 checksum for TSO */
+	if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+		olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
 
-			tx_buffer_info->length = size;
-			tx_buffer_info->dma =
-				skb_frag_dma_map(tx_ring->dev, frag,
-						 offset, size, DMA_TO_DEVICE);
-			if (dma_mapping_error(tx_ring->dev,
-					      tx_buffer_info->dma))
-				goto dma_error;
-			tx_buffer_info->mapped_as_page = true;
+	/* use index 1 context for TSO/FSO/FCOE */
+	if (tx_flags & IXGBE_TX_FLAGS_TSO)
+		olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
 
-			len -= size;
-			total -= size;
-			offset += size;
-			count++;
-			i++;
-			if (i == tx_ring->count)
-				i = 0;
-		}
-		if (total == 0)
-			break;
-	}
+	/* Check Context must be set if Tx switch is enabled, which it
+	 * always is for case where virtual functions are running
+	 */
+	olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
 
-	if (i == 0)
-		i = tx_ring->count - 1;
-	else
-		i = i - 1;
-	tx_ring->tx_buffer_info[i].skb = skb;
+	tx_desc->read.olinfo_status = olinfo_status;
+}
 
-	return count;
+static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
+			   struct ixgbevf_tx_buffer *first,
+			   const u8 hdr_len)
+{
+	dma_addr_t dma;
+	struct sk_buff *skb = first->skb;
+	struct ixgbevf_tx_buffer *tx_buffer;
+	union ixgbe_adv_tx_desc *tx_desc;
+	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+	unsigned int data_len = skb->data_len;
+	unsigned int size = skb_headlen(skb);
+	unsigned int paylen = skb->len - hdr_len;
+	u32 tx_flags = first->tx_flags;
+	__le32 cmd_type;
+	u16 i = tx_ring->next_to_use;
 
-dma_error:
-	dev_err(tx_ring->dev, "TX DMA map failed\n");
+	tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
 
-	/* clear timestamp and dma mappings for failed tx_buffer_info map */
-	tx_buffer_info->dma = 0;
-	count--;
+	ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
+	cmd_type = ixgbevf_tx_cmd_type(tx_flags);
 
-	/* clear timestamp and dma mappings for remaining portion of packet */
-	while (count >= 0) {
-		count--;
-		i--;
-		if (i < 0)
-			i += tx_ring->count;
-		tx_buffer_info = &tx_ring->tx_buffer_info[i];
-		ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
-	}
+	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+	if (dma_mapping_error(tx_ring->dev, dma))
+		goto dma_error;
 
-	return count;
-}
+	/* record length, and DMA address */
+	dma_unmap_len_set(first, len, size);
+	dma_unmap_addr_set(first, dma, dma);
 
-static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
-			     int count, unsigned int first, u32 paylen,
-			     u8 hdr_len)
-{
-	union ixgbe_adv_tx_desc *tx_desc = NULL;
-	struct ixgbevf_tx_buffer *tx_buffer_info;
-	u32 olinfo_status = 0, cmd_type_len = 0;
-	unsigned int i;
+	tx_desc->read.buffer_addr = cpu_to_le64(dma);
 
-	u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
+	for (;;) {
+		while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
+			tx_desc->read.cmd_type_len =
+				cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
 
-	cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
+			i++;
+			tx_desc++;
+			if (i == tx_ring->count) {
+				tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
+				i = 0;
+			}
 
-	cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+			dma += IXGBE_MAX_DATA_PER_TXD;
+			size -= IXGBE_MAX_DATA_PER_TXD;
 
-	if (tx_flags & IXGBE_TX_FLAGS_VLAN)
-		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+			tx_desc->read.buffer_addr = cpu_to_le64(dma);
+			tx_desc->read.olinfo_status = 0;
+		}
 
-	if (tx_flags & IXGBE_TX_FLAGS_CSUM)
-		olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
+		if (likely(!data_len))
+			break;
 
-	if (tx_flags & IXGBE_TX_FLAGS_TSO) {
-		cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+		tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
 
-		/* use index 1 context for tso */
-		olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
-		if (tx_flags & IXGBE_TX_FLAGS_IPV4)
-			olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
-	}
+		i++;
+		tx_desc++;
+		if (i == tx_ring->count) {
+			tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
+			i = 0;
+		}
 
-	/*
-	 * Check Context must be set if Tx switch is enabled, which it
-	 * always is for case where virtual functions are running
-	 */
-	olinfo_status |= IXGBE_ADVTXD_CC;
+		size = skb_frag_size(frag);
+		data_len -= size;
 
-	olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
+		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+				       DMA_TO_DEVICE);
+		if (dma_mapping_error(tx_ring->dev, dma))
+			goto dma_error;
 
-	i = tx_ring->next_to_use;
-	while (count--) {
-		tx_buffer_info = &tx_ring->tx_buffer_info[i];
-		tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
-		tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
-		tx_desc->read.cmd_type_len =
-			cpu_to_le32(cmd_type_len | tx_buffer_info->length);
-		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
-		i++;
-		if (i == tx_ring->count)
-			i = 0;
+		tx_buffer = &tx_ring->tx_buffer_info[i];
+		dma_unmap_len_set(tx_buffer, len, size);
+		dma_unmap_addr_set(tx_buffer, dma, dma);
+
+		tx_desc->read.buffer_addr = cpu_to_le64(dma);
+		tx_desc->read.olinfo_status = 0;
+
+		frag++;
 	}
 
-	tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+	/* write last descriptor with RS and EOP bits */
+	cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
+	tx_desc->read.cmd_type_len = cmd_type;
 
-	tx_ring->tx_buffer_info[first].time_stamp = jiffies;
+	/* set the timestamp */
+	first->time_stamp = jiffies;
 
-	/* Force memory writes to complete before letting h/w
-	 * know there are new descriptors to fetch.  (Only
-	 * applicable for weak-ordered memory model archs,
-	 * such as IA-64).
+	/* Force memory writes to complete before letting h/w know there
+	 * are new descriptors to fetch.  (Only applicable for weak-ordered
+	 * memory model archs, such as IA-64).
+	 *
+	 * We also need this memory barrier (wmb) to make certain all of the
+	 * status bits have been updated before next_to_watch is written.
 	 */
 	wmb();
 
-	tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
+	/* set next_to_watch value indicating a packet is present */
+	first->next_to_watch = tx_desc;
+
+	i++;
+	if (i == tx_ring->count)
+		i = 0;
+
+	tx_ring->next_to_use = i;
+
+	/* notify HW of packet */
+	writel(i, tx_ring->tail);
+
+	return;
+dma_error:
+	dev_err(tx_ring->dev, "TX DMA map failed\n");
+
+	/* clear dma mappings for failed tx_buffer_info map */
+	for (;;) {
+		tx_buffer = &tx_ring->tx_buffer_info[i];
+		ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+		if (tx_buffer == first)
+			break;
+		if (i == 0)
+			i = tx_ring->count;
+		i--;
+	}
+
 	tx_ring->next_to_use = i;
 }
 
 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
 {
-	struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
-
 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 	/* Herbert's original patch had:
 	 *  smp_mb__after_netif_stop_queue();
@@ -3107,7 +3095,8 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
 
 	/* A reprieve! - use start_queue because it doesn't call schedule */
 	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
-	++adapter->restart_queue;
+	++tx_ring->tx_stats.restart_queue;
+
 	return 0;
 }
 
@@ -3121,22 +3110,23 @@ static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 {
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbevf_tx_buffer *first;
 	struct ixgbevf_ring *tx_ring;
-	unsigned int first;
-	unsigned int tx_flags = 0;
-	u8 hdr_len = 0;
-	int r_idx = 0, tso;
+	int tso;
+	u32 tx_flags = 0;
 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
 	unsigned short f;
 #endif
+	u8 hdr_len = 0;
 	u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
+
 	if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
 		dev_kfree_skb(skb);
 		return NETDEV_TX_OK;
 	}
 
-	tx_ring = &adapter->tx_ring[r_idx];
+	tx_ring = adapter->tx_ring[skb->queue_mapping];
 
 	/*
 	 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
@@ -3152,38 +3142,41 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 	count += skb_shinfo(skb)->nr_frags;
 #endif
 	if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
-		adapter->tx_busy++;
+		tx_ring->tx_stats.tx_busy++;
 		return NETDEV_TX_BUSY;
 	}
 
+	/* record the location of the first descriptor for this packet */
+	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+	first->skb = skb;
+	first->bytecount = skb->len;
+	first->gso_segs = 1;
+
 	if (vlan_tx_tag_present(skb)) {
 		tx_flags |= vlan_tx_tag_get(skb);
 		tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
 		tx_flags |= IXGBE_TX_FLAGS_VLAN;
 	}
 
-	first = tx_ring->next_to_use;
+	/* record initial flags and protocol */
+	first->tx_flags = tx_flags;
+	first->protocol = vlan_get_protocol(skb);
 
-	if (skb->protocol == htons(ETH_P_IP))
-		tx_flags |= IXGBE_TX_FLAGS_IPV4;
-	tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
-	if (tso < 0) {
-		dev_kfree_skb_any(skb);
-		return NETDEV_TX_OK;
-	}
+	tso = ixgbevf_tso(tx_ring, first, &hdr_len);
+	if (tso < 0)
+		goto out_drop;
+	else
+		ixgbevf_tx_csum(tx_ring, first);
 
-	if (tso)
-		tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
-	else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
-		tx_flags |= IXGBE_TX_FLAGS_CSUM;
+	ixgbevf_tx_map(tx_ring, first, hdr_len);
 
-	ixgbevf_tx_queue(tx_ring, tx_flags,
-			 ixgbevf_tx_map(tx_ring, skb, tx_flags),
-			 first, skb->len, hdr_len);
+	ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
-	writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
+	return NETDEV_TX_OK;
 
-	ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
+out_drop:
+	dev_kfree_skb_any(first->skb);
+	first->skb = NULL;
 
 	return NETDEV_TX_OK;
 }
@@ -3289,8 +3282,8 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
 #ifdef CONFIG_PM
 static int ixgbevf_resume(struct pci_dev *pdev)
 {
-	struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
-	struct net_device *netdev = adapter->netdev;
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 	u32 err;
 
 	pci_set_power_state(pdev, PCI_D0);
@@ -3349,22 +3342,22 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
 	stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
 
 	for (i = 0; i < adapter->num_rx_queues; i++) {
-		ring = &adapter->rx_ring[i];
+		ring = adapter->rx_ring[i];
 		do {
 			start = u64_stats_fetch_begin_bh(&ring->syncp);
-			bytes = ring->total_bytes;
-			packets = ring->total_packets;
+			bytes = ring->stats.bytes;
+			packets = ring->stats.packets;
 		} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
 		stats->rx_bytes += bytes;
 		stats->rx_packets += packets;
 	}
 
 	for (i = 0; i < adapter->num_tx_queues; i++) {
-		ring = &adapter->tx_ring[i];
+		ring = adapter->tx_ring[i];
 		do {
 			start = u64_stats_fetch_begin_bh(&ring->syncp);
-			bytes = ring->total_bytes;
-			packets = ring->total_packets;
+			bytes = ring->stats.bytes;
+			packets = ring->stats.packets;
 		} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
 		stats->tx_bytes += bytes;
 		stats->tx_packets += packets;
@@ -3595,9 +3588,6 @@ static void ixgbevf_remove(struct pci_dev *pdev)
 
 	hw_dbg(&adapter->hw, "Remove complete\n");
 
-	kfree(adapter->tx_ring);
-	kfree(adapter->rx_ring);
-
 	free_netdev(netdev);
 
 	pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 4a5e3b0f712e..d74f5f4e5782 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -39,7 +39,6 @@
 #include <linux/ctype.h>
 #include <linux/types.h>
 #include <linux/interrupt.h>
-#include <linux/init.h>
 #include <linux/ioport.h>
 #include <linux/in.h>
 #include <linux/slab.h>
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index ec94a20d7099..8f9266c64c75 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -9,8 +9,7 @@
  *   GNU General Public License for more details.
  *
  *   You should have received a copy of the GNU General Public License
- *   along with this program; if not, write to the Free Software
- *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ *   along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  *   Copyright (C) 2011 John Crispin <blogic@openwrt.org>
  */
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index a49e81bdf8e8..6300fd27f2db 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -33,6 +33,7 @@ config MV643XX_ETH
 
 config MVMDIO
 	tristate "Marvell MDIO interface support"
+	depends on HAS_IOMEM
 	select PHYLIB
 	---help---
 	  This driver supports the MDIO interface found in the network
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 61088a6a9424..a2565ce22b7c 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -33,8 +33,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -2067,23 +2066,6 @@ static inline void oom_timer_wrapper(unsigned long data)
 	napi_schedule(&mp->napi);
 }
 
-static void phy_reset(struct mv643xx_eth_private *mp)
-{
-	int data;
-
-	data = phy_read(mp->phy, MII_BMCR);
-	if (data < 0)
-		return;
-
-	data |= BMCR_RESET;
-	if (phy_write(mp->phy, MII_BMCR, data) < 0)
-		return;
-
-	do {
-		data = phy_read(mp->phy, MII_BMCR);
-	} while (data >= 0 && data & BMCR_RESET);
-}
-
 static void port_start(struct mv643xx_eth_private *mp)
 {
 	u32 pscr;
@@ -2096,8 +2078,9 @@ static void port_start(struct mv643xx_eth_private *mp)
 		struct ethtool_cmd cmd;
 
 		mv643xx_eth_get_settings(mp->dev, &cmd);
-		phy_reset(mp);
+		phy_init_hw(mp->phy);
 		mv643xx_eth_set_settings(mp->dev, &cmd);
+		phy_start(mp->phy);
 	}
 
 	/*
@@ -2293,7 +2276,8 @@ static int mv643xx_eth_stop(struct net_device *dev)
 	del_timer_sync(&mp->rx_oom);
 
 	netif_carrier_off(dev);
-
+	if (mp->phy)
+		phy_stop(mp->phy);
 	free_irq(dev->irq, dev);
 
 	port_reset(mp);
@@ -2764,8 +2748,6 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
 {
 	struct phy_device *phy = mp->phy;
 
-	phy_reset(mp);
-
 	if (speed == 0) {
 		phy->autoneg = AUTONEG_ENABLE;
 		phy->speed = 0;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index c4eeb69a5bee..fd409d76b811 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -17,7 +17,6 @@
  * warranty of any kind, whether express or implied.
  */
 
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d5f0d72e5e33..f418f4f20f94 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -101,16 +101,56 @@
 #define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
 #define      MVNETA_CPU_TXQ_ACCESS_ALL_MASK      0x0000ff00
 #define MVNETA_RXQ_TIME_COAL_REG(q)              (0x2580 + ((q) << 2))
+
+/* Exception Interrupt Port/Queue Cause register */
+
 #define MVNETA_INTR_NEW_CAUSE                    0x25a0
-#define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
 #define MVNETA_INTR_NEW_MASK                     0x25a4
+
+/* bits  0..7  = TXQ SENT, one bit per queue.
+ * bits  8..15 = RXQ OCCUP, one bit per queue.
+ * bits 16..23 = RXQ FREE, one bit per queue.
+ * bit  29 = OLD_REG_SUM, see old reg ?
+ * bit  30 = TX_ERR_SUM, one bit for 4 ports
+ * bit  31 = MISC_SUM,   one bit for 4 ports
+ */
+#define      MVNETA_TX_INTR_MASK(nr_txqs)        (((1 << nr_txqs) - 1) << 0)
+#define      MVNETA_TX_INTR_MASK_ALL             (0xff << 0)
+#define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
+#define      MVNETA_RX_INTR_MASK_ALL             (0xff << 8)
+
 #define MVNETA_INTR_OLD_CAUSE                    0x25a8
 #define MVNETA_INTR_OLD_MASK                     0x25ac
+
+/* Data Path Port/Queue Cause Register */
 #define MVNETA_INTR_MISC_CAUSE                   0x25b0
 #define MVNETA_INTR_MISC_MASK                    0x25b4
+
+#define      MVNETA_CAUSE_PHY_STATUS_CHANGE      BIT(0)
+#define      MVNETA_CAUSE_LINK_CHANGE            BIT(1)
+#define      MVNETA_CAUSE_PTP                    BIT(4)
+
+#define      MVNETA_CAUSE_INTERNAL_ADDR_ERR      BIT(7)
+#define      MVNETA_CAUSE_RX_OVERRUN             BIT(8)
+#define      MVNETA_CAUSE_RX_CRC_ERROR           BIT(9)
+#define      MVNETA_CAUSE_RX_LARGE_PKT           BIT(10)
+#define      MVNETA_CAUSE_TX_UNDERUN             BIT(11)
+#define      MVNETA_CAUSE_PRBS_ERR               BIT(12)
+#define      MVNETA_CAUSE_PSC_SYNC_CHANGE        BIT(13)
+#define      MVNETA_CAUSE_SERDES_SYNC_ERR        BIT(14)
+
+#define      MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT    16
+#define      MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK   (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
+#define      MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
+
+#define      MVNETA_CAUSE_TXQ_ERROR_SHIFT        24
+#define      MVNETA_CAUSE_TXQ_ERROR_ALL_MASK     (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
+#define      MVNETA_CAUSE_TXQ_ERROR_MASK(q)      (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
+
 #define MVNETA_INTR_ENABLE                       0x25b8
 #define      MVNETA_TXQ_INTR_ENABLE_ALL_MASK     0x0000ff00
-#define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0xff000000
+#define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0xff000000  // note: neta says it's 0x000000FF
+
 #define MVNETA_RXQ_CMD                           0x2680
 #define      MVNETA_RXQ_DISABLE_SHIFT            8
 #define      MVNETA_RXQ_ENABLE_MASK              0x000000ff
@@ -176,9 +216,6 @@
 #define MVNETA_RX_COAL_PKTS		32
 #define MVNETA_RX_COAL_USEC		100
 
-/* Timer */
-#define MVNETA_TX_DONE_TIMER_PERIOD	10
-
 /* Napi polling weight */
 #define MVNETA_RX_POLL_WEIGHT		64
 
@@ -221,27 +258,25 @@
 
 #define MVNETA_RX_BUF_SIZE(pkt_size)   ((pkt_size) + NET_SKB_PAD)
 
-struct mvneta_stats {
+struct mvneta_pcpu_stats {
 	struct	u64_stats_sync syncp;
-	u64	packets;
-	u64	bytes;
+	u64	rx_packets;
+	u64	rx_bytes;
+	u64	tx_packets;
+	u64	tx_bytes;
 };
 
 struct mvneta_port {
 	int pkt_size;
+	unsigned int frag_size;
 	void __iomem *base;
 	struct mvneta_rx_queue *rxqs;
 	struct mvneta_tx_queue *txqs;
-	struct timer_list tx_done_timer;
 	struct net_device *dev;
 
 	u32 cause_rx_tx;
 	struct napi_struct napi;
 
-	/* Flags */
-	unsigned long flags;
-#define MVNETA_F_TX_DONE_TIMER_BIT  0
-
 	/* Napi weight */
 	int weight;
 
@@ -250,8 +285,7 @@ struct mvneta_port {
 	u8 mcast_count[256];
 	u16 tx_ring_size;
 	u16 rx_ring_size;
-	struct mvneta_stats tx_stats;
-	struct mvneta_stats rx_stats;
+	struct mvneta_pcpu_stats *stats;
 
 	struct mii_bus *mii_bus;
 	struct phy_device *phy_dev;
@@ -410,6 +444,8 @@ static int txq_number = 8;
 
 static int rxq_def;
 
+static int rx_copybreak __read_mostly = 256;
+
 #define MVNETA_DRIVER_NAME "mvneta"
 #define MVNETA_DRIVER_VERSION "1.0"
 
@@ -461,21 +497,29 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
 {
 	struct mvneta_port *pp = netdev_priv(dev);
 	unsigned int start;
+	int cpu;
 
-	memset(stats, 0, sizeof(struct rtnl_link_stats64));
-
-	do {
-		start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp);
-		stats->rx_packets = pp->rx_stats.packets;
-		stats->rx_bytes	= pp->rx_stats.bytes;
-	} while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start));
+	for_each_possible_cpu(cpu) {
+		struct mvneta_pcpu_stats *cpu_stats;
+		u64 rx_packets;
+		u64 rx_bytes;
+		u64 tx_packets;
+		u64 tx_bytes;
 
+		cpu_stats = per_cpu_ptr(pp->stats, cpu);
+		do {
+			start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
+			rx_packets = cpu_stats->rx_packets;
+			rx_bytes   = cpu_stats->rx_bytes;
+			tx_packets = cpu_stats->tx_packets;
+			tx_bytes   = cpu_stats->tx_bytes;
+		} while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
 
-	do {
-		start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp);
-		stats->tx_packets = pp->tx_stats.packets;
-		stats->tx_bytes	= pp->tx_stats.bytes;
-	} while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start));
+		stats->rx_packets += rx_packets;
+		stats->rx_bytes   += rx_bytes;
+		stats->tx_packets += tx_packets;
+		stats->tx_bytes   += tx_bytes;
+	}
 
 	stats->rx_errors	= dev->stats.rx_errors;
 	stats->rx_dropped	= dev->stats.rx_dropped;
@@ -487,14 +531,14 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
 
 /* Rx descriptors helper methods */
 
-/* Checks whether the given RX descriptor is both the first and the
- * last descriptor for the RX packet. Each RX packet is currently
+/* Checks whether the RX descriptor having this status is both the first
+ * and the last descriptor for the RX packet. Each RX packet is currently
  * received through a single RX descriptor, so not having each RX
  * descriptor with its first and last bits set is an error
  */
-static int mvneta_rxq_desc_is_first_last(struct mvneta_rx_desc *desc)
+static int mvneta_rxq_desc_is_first_last(u32 status)
 {
-	return (desc->status & MVNETA_RXD_FIRST_LAST_DESC) ==
+	return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
 		MVNETA_RXD_FIRST_LAST_DESC;
 }
 
@@ -570,6 +614,7 @@ mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
 	int rx_desc = rxq->next_desc_to_proc;
 
 	rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
+	prefetch(rxq->descs + rxq->next_desc_to_proc);
 	return rxq->descs + rx_desc;
 }
 
@@ -1100,17 +1145,6 @@ static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
 	txq->done_pkts_coal = value;
 }
 
-/* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
-static void mvneta_add_tx_done_timer(struct mvneta_port *pp)
-{
-	if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) {
-		pp->tx_done_timer.expires = jiffies +
-			msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD);
-		add_timer(&pp->tx_done_timer);
-	}
-}
-
-
 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
 				u32 phys_addr, u32 cookie)
@@ -1204,10 +1238,10 @@ static void mvneta_rx_error(struct mvneta_port *pp,
 {
 	u32 status = rx_desc->status;
 
-	if (!mvneta_rxq_desc_is_first_last(rx_desc)) {
+	if (!mvneta_rxq_desc_is_first_last(status)) {
 		netdev_err(pp->dev,
 			   "bad rx status %08x (buffer oversize), size=%d\n",
-			   rx_desc->status, rx_desc->data_size);
+			   status, rx_desc->data_size);
 		return;
 	}
 
@@ -1231,13 +1265,12 @@ static void mvneta_rx_error(struct mvneta_port *pp,
 	}
 }
 
-/* Handle RX checksum offload */
-static void mvneta_rx_csum(struct mvneta_port *pp,
-			   struct mvneta_rx_desc *rx_desc,
+/* Handle RX checksum offload based on the descriptor's status */
+static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
 			   struct sk_buff *skb)
 {
-	if ((rx_desc->status & MVNETA_RXD_L3_IP4) &&
-	    (rx_desc->status & MVNETA_RXD_L4_CSUM_OK)) {
+	if ((status & MVNETA_RXD_L3_IP4) &&
+	    (status & MVNETA_RXD_L4_CSUM_OK)) {
 		skb->csum = 0;
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 		return;
@@ -1246,13 +1279,16 @@ static void mvneta_rx_csum(struct mvneta_port *pp,
 	skb->ip_summed = CHECKSUM_NONE;
 }
 
-/* Return tx queue pointer (find last set bit) according to causeTxDone reg */
+/* Return tx queue pointer (find last set bit) according to <cause> returned
+ * form tx_done reg. <cause> must not be null. The return value is always a
+ * valid queue for matching the first one found in <cause>.
+ */
 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
 						     u32 cause)
 {
 	int queue = fls(cause) - 1;
 
-	return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue];
+	return &pp->txqs[queue];
 }
 
 /* Free tx queue skbuffs */
@@ -1278,15 +1314,16 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
 }
 
 /* Handle end of transmission */
-static int mvneta_txq_done(struct mvneta_port *pp,
+static void mvneta_txq_done(struct mvneta_port *pp,
 			   struct mvneta_tx_queue *txq)
 {
 	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
 	int tx_done;
 
 	tx_done = mvneta_txq_sent_desc_proc(pp, txq);
-	if (tx_done == 0)
-		return tx_done;
+	if (!tx_done)
+		return;
+
 	mvneta_txq_bufs_free(pp, txq, tx_done);
 
 	txq->count -= tx_done;
@@ -1295,8 +1332,22 @@ static int mvneta_txq_done(struct mvneta_port *pp,
 		if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
 			netif_tx_wake_queue(nq);
 	}
+}
 
-	return tx_done;
+static void *mvneta_frag_alloc(const struct mvneta_port *pp)
+{
+	if (likely(pp->frag_size <= PAGE_SIZE))
+		return netdev_alloc_frag(pp->frag_size);
+	else
+		return kmalloc(pp->frag_size, GFP_ATOMIC);
+}
+
+static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
+{
+	if (likely(pp->frag_size <= PAGE_SIZE))
+		put_page(virt_to_head_page(data));
+	else
+		kfree(data);
 }
 
 /* Refill processing */
@@ -1305,22 +1356,21 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
 
 {
 	dma_addr_t phys_addr;
-	struct sk_buff *skb;
+	void *data;
 
-	skb = netdev_alloc_skb(pp->dev, pp->pkt_size);
-	if (!skb)
+	data = mvneta_frag_alloc(pp);
+	if (!data)
 		return -ENOMEM;
 
-	phys_addr = dma_map_single(pp->dev->dev.parent, skb->head,
+	phys_addr = dma_map_single(pp->dev->dev.parent, data,
 				   MVNETA_RX_BUF_SIZE(pp->pkt_size),
 				   DMA_FROM_DEVICE);
 	if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
-		dev_kfree_skb(skb);
+		mvneta_frag_free(pp, data);
 		return -ENOMEM;
 	}
 
-	mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
-
+	mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
 	return 0;
 }
 
@@ -1374,9 +1424,9 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
 	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
 	for (i = 0; i < rxq->size; i++) {
 		struct mvneta_rx_desc *rx_desc = rxq->descs + i;
-		struct sk_buff *skb = (struct sk_buff *)rx_desc->buf_cookie;
+		void *data = (void *)rx_desc->buf_cookie;
 
-		dev_kfree_skb_any(skb);
+		mvneta_frag_free(pp, data);
 		dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
 				 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
 	}
@@ -1391,6 +1441,8 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
 {
 	struct net_device *dev = pp->dev;
 	int rx_done, rx_filled;
+	u32 rcvd_pkts = 0;
+	u32 rcvd_bytes = 0;
 
 	/* Get number of received packets */
 	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -1405,53 +1457,89 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
 	while (rx_done < rx_todo) {
 		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
 		struct sk_buff *skb;
+		unsigned char *data;
 		u32 rx_status;
 		int rx_bytes, err;
 
-		prefetch(rx_desc);
 		rx_done++;
 		rx_filled++;
 		rx_status = rx_desc->status;
-		skb = (struct sk_buff *)rx_desc->buf_cookie;
+		rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
+		data = (unsigned char *)rx_desc->buf_cookie;
 
-		if (!mvneta_rxq_desc_is_first_last(rx_desc) ||
+		if (!mvneta_rxq_desc_is_first_last(rx_status) ||
 		    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
+		err_drop_frame:
 			dev->stats.rx_errors++;
 			mvneta_rx_error(pp, rx_desc);
-			mvneta_rx_desc_fill(rx_desc, rx_desc->buf_phys_addr,
-					    (u32)skb);
+			/* leave the descriptor untouched */
 			continue;
 		}
 
-		dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
+		if (rx_bytes <= rx_copybreak) {
+			/* better copy a small frame and not unmap the DMA region */
+			skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
+			if (unlikely(!skb))
+				goto err_drop_frame;
+
+			dma_sync_single_range_for_cpu(dev->dev.parent,
+			                              rx_desc->buf_phys_addr,
+			                              MVNETA_MH_SIZE + NET_SKB_PAD,
+			                              rx_bytes,
+			                              DMA_FROM_DEVICE);
+			memcpy(skb_put(skb, rx_bytes),
+			       data + MVNETA_MH_SIZE + NET_SKB_PAD,
+			       rx_bytes);
+
+			skb->protocol = eth_type_trans(skb, dev);
+			mvneta_rx_csum(pp, rx_status, skb);
+			napi_gro_receive(&pp->napi, skb);
+
+			rcvd_pkts++;
+			rcvd_bytes += rx_bytes;
+
+			/* leave the descriptor and buffer untouched */
+			continue;
+		}
+
+		skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
+		if (!skb)
+			goto err_drop_frame;
+
+		dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr,
 				 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
 
-		rx_bytes = rx_desc->data_size -
-			(ETH_FCS_LEN + MVNETA_MH_SIZE);
-		u64_stats_update_begin(&pp->rx_stats.syncp);
-		pp->rx_stats.packets++;
-		pp->rx_stats.bytes += rx_bytes;
-		u64_stats_update_end(&pp->rx_stats.syncp);
+		rcvd_pkts++;
+		rcvd_bytes += rx_bytes;
 
 		/* Linux processing */
-		skb_reserve(skb, MVNETA_MH_SIZE);
+		skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
 		skb_put(skb, rx_bytes);
 
 		skb->protocol = eth_type_trans(skb, dev);
 
-		mvneta_rx_csum(pp, rx_desc, skb);
+		mvneta_rx_csum(pp, rx_status, skb);
 
 		napi_gro_receive(&pp->napi, skb);
 
 		/* Refill processing */
 		err = mvneta_rx_refill(pp, rx_desc);
 		if (err) {
-			netdev_err(pp->dev, "Linux processing - Can't refill\n");
+			netdev_err(dev, "Linux processing - Can't refill\n");
 			rxq->missed++;
 			rx_filled--;
 		}
 	}
 
+	if (rcvd_pkts) {
+		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+		u64_stats_update_begin(&stats->syncp);
+		stats->rx_packets += rcvd_pkts;
+		stats->rx_bytes   += rcvd_bytes;
+		u64_stats_update_end(&stats->syncp);
+	}
+
 	/* Update rxq management counters */
 	mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
 
@@ -1582,25 +1670,17 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
 
 out:
 	if (frags > 0) {
-		u64_stats_update_begin(&pp->tx_stats.syncp);
-		pp->tx_stats.packets++;
-		pp->tx_stats.bytes += skb->len;
-		u64_stats_update_end(&pp->tx_stats.syncp);
+		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
 
+		u64_stats_update_begin(&stats->syncp);
+		stats->tx_packets++;
+		stats->tx_bytes  += skb->len;
+		u64_stats_update_end(&stats->syncp);
 	} else {
 		dev->stats.tx_dropped++;
 		dev_kfree_skb_any(skb);
 	}
 
-	if (txq->count >= MVNETA_TXDONE_COAL_PKTS)
-		mvneta_txq_done(pp, txq);
-
-	/* If after calling mvneta_txq_done, count equals
-	 * frags, we need to set the timer
-	 */
-	if (txq->count == frags && frags > 0)
-		mvneta_add_tx_done_timer(pp);
-
 	return NETDEV_TX_OK;
 }
 
@@ -1620,33 +1700,26 @@ static void mvneta_txq_done_force(struct mvneta_port *pp,
 	txq->txq_get_index = 0;
 }
 
-/* handle tx done - called from tx done timer callback */
-static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
-			      int *tx_todo)
+/* Handle tx done - called in softirq context. The <cause_tx_done> argument
+ * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
+ */
+static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
 {
 	struct mvneta_tx_queue *txq;
-	u32 tx_done = 0;
 	struct netdev_queue *nq;
 
-	*tx_todo = 0;
-	while (cause_tx_done != 0) {
+	while (cause_tx_done) {
 		txq = mvneta_tx_done_policy(pp, cause_tx_done);
-		if (!txq)
-			break;
 
 		nq = netdev_get_tx_queue(pp->dev, txq->id);
 		__netif_tx_lock(nq, smp_processor_id());
 
-		if (txq->count) {
-			tx_done += mvneta_txq_done(pp, txq);
-			*tx_todo += txq->count;
-		}
+		if (txq->count)
+			mvneta_txq_done(pp, txq);
 
 		__netif_tx_unlock(nq);
 		cause_tx_done &= ~((1 << txq->id));
 	}
-
-	return tx_done;
 }
 
 /* Compute crc8 of the specified address, using a unique algorithm ,
@@ -1876,14 +1949,20 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
 
 	/* Read cause register */
 	cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
-		MVNETA_RX_INTR_MASK(rxq_number);
+		(MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
+
+	/* Release Tx descriptors */
+	if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
+		mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
+		cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
+	}
 
 	/* For the case where the last mvneta_poll did not process all
 	 * RX packets
 	 */
 	cause_rx_tx |= pp->cause_rx_tx;
 	if (rxq_number > 1) {
-		while ((cause_rx_tx != 0) && (budget > 0)) {
+		while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) {
 			int count;
 			struct mvneta_rx_queue *rxq;
 			/* get rx queue number from cause_rx_tx */
@@ -1915,7 +1994,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
 		napi_complete(napi);
 		local_irq_save(flags);
 		mvreg_write(pp, MVNETA_INTR_NEW_MASK,
-			    MVNETA_RX_INTR_MASK(rxq_number));
+			    MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
 		local_irq_restore(flags);
 	}
 
@@ -1923,56 +2002,19 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
 	return rx_done;
 }
 
-/* tx done timer callback */
-static void mvneta_tx_done_timer_callback(unsigned long data)
-{
-	struct net_device *dev = (struct net_device *)data;
-	struct mvneta_port *pp = netdev_priv(dev);
-	int tx_done = 0, tx_todo = 0;
-
-	if (!netif_running(dev))
-		return ;
-
-	clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
-
-	tx_done = mvneta_tx_done_gbe(pp,
-				     (((1 << txq_number) - 1) &
-				      MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK),
-				     &tx_todo);
-	if (tx_todo > 0)
-		mvneta_add_tx_done_timer(pp);
-}
-
 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 			   int num)
 {
-	struct net_device *dev = pp->dev;
 	int i;
 
 	for (i = 0; i < num; i++) {
-		struct sk_buff *skb;
-		struct mvneta_rx_desc *rx_desc;
-		unsigned long phys_addr;
-
-		skb = dev_alloc_skb(pp->pkt_size);
-		if (!skb) {
-			netdev_err(dev, "%s:rxq %d, %d of %d buffs  filled\n",
+		memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
+		if (mvneta_rx_refill(pp, rxq->descs + i) != 0) {
+			netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs  filled\n",
 				__func__, rxq->id, i, num);
 			break;
 		}
-
-		rx_desc = rxq->descs + i;
-		memset(rx_desc, 0, sizeof(struct mvneta_rx_desc));
-		phys_addr = dma_map_single(dev->dev.parent, skb->head,
-					   MVNETA_RX_BUF_SIZE(pp->pkt_size),
-					   DMA_FROM_DEVICE);
-		if (unlikely(dma_mapping_error(dev->dev.parent, phys_addr))) {
-			dev_kfree_skb(skb);
-			break;
-		}
-
-		mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
 	}
 
 	/* Add this number of RX descriptors as non occupied (ready to
@@ -2192,7 +2234,7 @@ static void mvneta_start_dev(struct mvneta_port *pp)
 
 	/* Unmask interrupts */
 	mvreg_write(pp, MVNETA_INTR_NEW_MASK,
-		    MVNETA_RX_INTR_MASK(rxq_number));
+		    MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
 
 	phy_start(pp->phy_dev);
 	netif_tx_start_all_queues(pp->dev);
@@ -2225,16 +2267,6 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
 	mvneta_rx_reset(pp);
 }
 
-/* tx timeout callback - display a message and stop/start the network device */
-static void mvneta_tx_timeout(struct net_device *dev)
-{
-	struct mvneta_port *pp = netdev_priv(dev);
-
-	netdev_info(dev, "tx timeout\n");
-	mvneta_stop_dev(pp);
-	mvneta_start_dev(pp);
-}
-
 /* Return positive if MTU is valid */
 static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
 {
@@ -2282,6 +2314,8 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
 	mvneta_cleanup_rxqs(pp);
 
 	pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+	pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
+	                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
 	ret = mvneta_setup_rxqs(pp);
 	if (ret) {
@@ -2429,6 +2463,8 @@ static int mvneta_open(struct net_device *dev)
 	mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
 
 	pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+	pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
+	                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
 	ret = mvneta_setup_rxqs(pp);
 	if (ret)
@@ -2478,8 +2514,6 @@ static int mvneta_stop(struct net_device *dev)
 	free_irq(dev->irq, pp);
 	mvneta_cleanup_rxqs(pp);
 	mvneta_cleanup_txqs(pp);
-	del_timer(&pp->tx_done_timer);
-	clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
 
 	return 0;
 }
@@ -2615,7 +2649,6 @@ static const struct net_device_ops mvneta_netdev_ops = {
 	.ndo_set_rx_mode     = mvneta_set_rx_mode,
 	.ndo_set_mac_address = mvneta_set_mac_addr,
 	.ndo_change_mtu      = mvneta_change_mtu,
-	.ndo_tx_timeout      = mvneta_tx_timeout,
 	.ndo_get_stats64     = mvneta_get_stats64,
 	.ndo_do_ioctl        = mvneta_ioctl,
 };
@@ -2751,6 +2784,7 @@ static int mvneta_probe(struct platform_device *pdev)
 	const char *mac_from;
 	int phy_mode;
 	int err;
+	int cpu;
 
 	/* Our multiqueue support is not complete, so for now, only
 	 * allow the usage of the first RX queue
@@ -2792,9 +2826,6 @@ static int mvneta_probe(struct platform_device *pdev)
 
 	pp = netdev_priv(dev);
 
-	u64_stats_init(&pp->tx_stats.syncp);
-	u64_stats_init(&pp->rx_stats.syncp);
-
 	pp->weight = MVNETA_RX_POLL_WEIGHT;
 	pp->phy_node = phy_node;
 	pp->phy_interface = phy_mode;
@@ -2813,6 +2844,19 @@ static int mvneta_probe(struct platform_device *pdev)
 		goto err_clk;
 	}
 
+	/* Alloc per-cpu stats */
+	pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
+	if (!pp->stats) {
+		err = -ENOMEM;
+		goto err_unmap;
+	}
+
+	for_each_possible_cpu(cpu) {
+		struct mvneta_pcpu_stats *stats;
+		stats = per_cpu_ptr(pp->stats, cpu);
+		u64_stats_init(&stats->syncp);
+	}
+
 	dt_mac_addr = of_get_mac_address(dn);
 	if (dt_mac_addr) {
 		mac_from = "device tree";
@@ -2828,11 +2872,6 @@ static int mvneta_probe(struct platform_device *pdev)
 		}
 	}
 
-	pp->tx_done_timer.data = (unsigned long)dev;
-	pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
-	init_timer(&pp->tx_done_timer);
-	clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
-
 	pp->tx_ring_size = MVNETA_MAX_TXD;
 	pp->rx_ring_size = MVNETA_MAX_RXD;
 
@@ -2842,7 +2881,7 @@ static int mvneta_probe(struct platform_device *pdev)
 	err = mvneta_init(pp, phy_addr);
 	if (err < 0) {
 		dev_err(&pdev->dev, "can't init eth hal\n");
-		goto err_unmap;
+		goto err_free_stats;
 	}
 	mvneta_port_power_up(pp, phy_mode);
 
@@ -2872,6 +2911,8 @@ static int mvneta_probe(struct platform_device *pdev)
 
 err_deinit:
 	mvneta_deinit(pp);
+err_free_stats:
+	free_percpu(pp->stats);
 err_unmap:
 	iounmap(pp->base);
 err_clk:
@@ -2892,6 +2933,7 @@ static int mvneta_remove(struct platform_device *pdev)
 	unregister_netdev(dev);
 	mvneta_deinit(pp);
 	clk_disable_unprepare(pp->clk);
+	free_percpu(pp->stats);
 	iounmap(pp->base);
 	irq_dispose_mapping(dev->irq);
 	free_netdev(dev);
@@ -2924,3 +2966,4 @@ module_param(rxq_number, int, S_IRUGO);
 module_param(txq_number, int, S_IRUGO);
 
 module_param(rxq_def, int, S_IRUGO);
+module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index fff62460185c..b358c2f6f4bd 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -19,11 +19,9 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <linux/init.h>
 #include <linux/dma-mapping.h>
 #include <linux/in.h>
 #include <linux/ip.h>
@@ -321,23 +319,6 @@ static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
 	wrl(pep, PHY_ADDRESS, reg_data);
 }
 
-static void ethernet_phy_reset(struct pxa168_eth_private *pep)
-{
-	int data;
-
-	data = phy_read(pep->phy, MII_BMCR);
-	if (data < 0)
-		return;
-
-	data |= BMCR_RESET;
-	if (phy_write(pep->phy, MII_BMCR, data) < 0)
-		return;
-
-	do {
-		data = phy_read(pep->phy, MII_BMCR);
-	} while (data >= 0 && data & BMCR_RESET);
-}
-
 static void rxq_refill(struct net_device *dev)
 {
 	struct pxa168_eth_private *pep = netdev_priv(dev);
@@ -646,7 +627,7 @@ static void eth_port_start(struct net_device *dev)
 		struct ethtool_cmd cmd;
 
 		pxa168_get_settings(pep->dev, &cmd);
-		ethernet_phy_reset(pep);
+		phy_init_hw(pep->phy);
 		pxa168_set_settings(pep->dev, &cmd);
 	}
 
@@ -1383,7 +1364,6 @@ static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
 static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
 {
 	struct phy_device *phy = pep->phy;
-	ethernet_phy_reset(pep);
 
 	phy_attach(pep->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII);
 
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 43aa7acd84a6..6509935d145e 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2495,7 +2495,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
 		skb_copy_from_linear_data(re->skb, skb->data, length);
 		skb->ip_summed = re->skb->ip_summed;
 		skb->csum = re->skb->csum;
-		skb->rxhash = re->skb->rxhash;
+		skb_copy_hash(skb, re->skb);
 		skb->vlan_proto = re->skb->vlan_proto;
 		skb->vlan_tci = re->skb->vlan_tci;
 
@@ -2503,7 +2503,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
 					       length, PCI_DMA_FROMDEVICE);
 		re->skb->vlan_proto = 0;
 		re->skb->vlan_tci = 0;
-		re->skb->rxhash = 0;
+		skb_clear_hash(re->skb);
 		re->skb->ip_summed = CHECKSUM_NONE;
 		skb_put(skb, length);
 	}
@@ -2723,7 +2723,7 @@ static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
 	struct sk_buff *skb;
 
 	skb = sky2->rx_ring[sky2->rx_next].skb;
-	skb->rxhash = le32_to_cpu(status);
+	skb_set_hash(skb, le32_to_cpu(status), PKT_HASH_TYPE_L3);
 }
 
 /* Process status response ring */
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index eb520ab64014..563495d8975a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -6,6 +6,7 @@ config MLX4_EN
 	tristate "Mellanox Technologies 10Gbit Ethernet support"
 	depends on PCI
 	select MLX4_CORE
+	select PTP_1588_CLOCK
 	---help---
 	  This driver supports Mellanox Technologies ConnectX Ethernet
 	  devices.
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 06fef5b44f77..c3ad464d0627 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -71,9 +71,9 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
 	return obj;
 }
 
-void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
+void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr)
 {
-	mlx4_bitmap_free_range(bitmap, obj, 1);
+	mlx4_bitmap_free_range(bitmap, obj, 1, use_rr);
 }
 
 u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
@@ -118,11 +118,17 @@ u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
 	return bitmap->avail;
 }
 
-void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
+			    int use_rr)
 {
 	obj &= bitmap->max + bitmap->reserved_top - 1;
 
 	spin_lock(&bitmap->lock);
+	if (!use_rr) {
+		bitmap->last = min(bitmap->last, obj);
+		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+				& bitmap->mask;
+	}
 	bitmap_clear(bitmap->table, obj, cnt);
 	bitmap->avail += cnt;
 	spin_unlock(&bitmap->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 22fcbe78311c..0487121e4a0f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -34,7 +34,6 @@
  * SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/hardirq.h>
 #include <linux/export.h>
 
@@ -187,7 +186,7 @@ err_put:
 	mlx4_table_put(dev, &cq_table->table, *cqn);
 
 err_out:
-	mlx4_bitmap_free(&cq_table->bitmap, *cqn);
+	mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR);
 	return err;
 }
 
@@ -217,7 +216,7 @@ void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
 
 	mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
 	mlx4_table_put(dev, &cq_table->table, cqn);
-	mlx4_bitmap_free(&cq_table->bitmap, cqn);
+	mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR);
 }
 
 static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index fd6441071319..abaf6bb22416 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -42,6 +42,10 @@ int mlx4_en_timestamp_config(struct net_device *dev, int tx_type, int rx_filter)
 	int port_up = 0;
 	int err = 0;
 
+	if (priv->hwtstamp_config.tx_type == tx_type &&
+	    priv->hwtstamp_config.rx_filter == rx_filter)
+		return 0;
+
 	mutex_lock(&mdev->state_lock);
 	if (priv->port_up) {
 		port_up = 1;
@@ -103,19 +107,191 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
 			    struct skb_shared_hwtstamps *hwts,
 			    u64 timestamp)
 {
+	unsigned long flags;
 	u64 nsec;
 
+	read_lock_irqsave(&mdev->clock_lock, flags);
 	nsec = timecounter_cyc2time(&mdev->clock, timestamp);
+	read_unlock_irqrestore(&mdev->clock_lock, flags);
 
 	memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
 	hwts->hwtstamp = ns_to_ktime(nsec);
 }
 
+/**
+ * mlx4_en_remove_timestamp - disable PTP device
+ * @mdev: board private structure
+ *
+ * Stop the PTP support.
+ **/
+void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
+{
+	if (mdev->ptp_clock) {
+		ptp_clock_unregister(mdev->ptp_clock);
+		mdev->ptp_clock = NULL;
+		mlx4_info(mdev, "removed PHC\n");
+	}
+}
+
+void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
+{
+	bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
+					      mdev->overflow_period);
+	unsigned long flags;
+
+	if (timeout) {
+		write_lock_irqsave(&mdev->clock_lock, flags);
+		timecounter_read(&mdev->clock);
+		write_unlock_irqrestore(&mdev->clock_lock, flags);
+		mdev->last_overflow_check = jiffies;
+	}
+}
+
+/**
+ * mlx4_en_phc_adjfreq - adjust the frequency of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired frequency change in parts per billion
+ *
+ * Adjust the frequency of the PHC cycle counter by the indicated delta from
+ * the base frequency.
+ **/
+static int mlx4_en_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+	u64 adj;
+	u32 diff, mult;
+	int neg_adj = 0;
+	unsigned long flags;
+	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+						ptp_clock_info);
+
+	if (delta < 0) {
+		neg_adj = 1;
+		delta = -delta;
+	}
+	mult = mdev->nominal_c_mult;
+	adj = mult;
+	adj *= delta;
+	diff = div_u64(adj, 1000000000ULL);
+
+	write_lock_irqsave(&mdev->clock_lock, flags);
+	timecounter_read(&mdev->clock);
+	mdev->cycles.mult = neg_adj ? mult - diff : mult + diff;
+	write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+	return 0;
+}
+
+/**
+ * mlx4_en_phc_adjtime - Shift the time of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired change in nanoseconds
+ *
+ * Adjust the timer by resetting the timecounter structure.
+ **/
+static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+						ptp_clock_info);
+	unsigned long flags;
+	s64 now;
+
+	write_lock_irqsave(&mdev->clock_lock, flags);
+	now = timecounter_read(&mdev->clock);
+	now += delta;
+	timecounter_init(&mdev->clock, &mdev->cycles, now);
+	write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+	return 0;
+}
+
+/**
+ * mlx4_en_phc_gettime - Reads the current time from the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * Read the timecounter and return the correct value in ns after converting
+ * it into a struct timespec.
+ **/
+static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+						ptp_clock_info);
+	unsigned long flags;
+	u32 remainder;
+	u64 ns;
+
+	write_lock_irqsave(&mdev->clock_lock, flags);
+	ns = timecounter_read(&mdev->clock);
+	write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+	ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder);
+	ts->tv_nsec = remainder;
+
+	return 0;
+}
+
+/**
+ * mlx4_en_phc_settime - Set the current time on the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec containing the new time for the cycle counter
+ *
+ * Reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ **/
+static int mlx4_en_phc_settime(struct ptp_clock_info *ptp,
+			       const struct timespec *ts)
+{
+	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
+						ptp_clock_info);
+	u64 ns = timespec_to_ns(ts);
+	unsigned long flags;
+
+	/* reset the timecounter */
+	write_lock_irqsave(&mdev->clock_lock, flags);
+	timecounter_init(&mdev->clock, &mdev->cycles, ns);
+	write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+	return 0;
+}
+
+/**
+ * mlx4_en_phc_enable - enable or disable an ancillary feature
+ * @ptp: ptp clock structure
+ * @request: Desired resource to enable or disable
+ * @on: Caller passes one to enable or zero to disable
+ *
+ * Enable (or disable) ancillary features of the PHC subsystem.
+ * Currently, no ancillary features are supported.
+ **/
+static int mlx4_en_phc_enable(struct ptp_clock_info __always_unused *ptp,
+			      struct ptp_clock_request __always_unused *request,
+			      int __always_unused on)
+{
+	return -EOPNOTSUPP;
+}
+
+static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
+	.owner		= THIS_MODULE,
+	.max_adj	= 100000000,
+	.n_alarm	= 0,
+	.n_ext_ts	= 0,
+	.n_per_out	= 0,
+	.pps		= 0,
+	.adjfreq	= mlx4_en_phc_adjfreq,
+	.adjtime	= mlx4_en_phc_adjtime,
+	.gettime	= mlx4_en_phc_gettime,
+	.settime	= mlx4_en_phc_settime,
+	.enable		= mlx4_en_phc_enable,
+};
+
 void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
 {
 	struct mlx4_dev *dev = mdev->dev;
+	unsigned long flags;
 	u64 ns;
 
+	rwlock_init(&mdev->clock_lock);
+
 	memset(&mdev->cycles, 0, sizeof(mdev->cycles));
 	mdev->cycles.read = mlx4_en_read_clock;
 	mdev->cycles.mask = CLOCKSOURCE_MASK(48);
@@ -127,9 +303,12 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
 	mdev->cycles.shift = 14;
 	mdev->cycles.mult =
 		clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
+	mdev->nominal_c_mult = mdev->cycles.mult;
 
+	write_lock_irqsave(&mdev->clock_lock, flags);
 	timecounter_init(&mdev->clock, &mdev->cycles,
 			 ktime_to_ns(ktime_get_real()));
+	write_unlock_irqrestore(&mdev->clock_lock, flags);
 
 	/* Calculate period in seconds to call the overflow watchdog - to make
 	 * sure counter is checked at least once every wrap around.
@@ -137,15 +316,18 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
 	ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask);
 	do_div(ns, NSEC_PER_SEC / 2 / HZ);
 	mdev->overflow_period = ns;
-}
 
-void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
-{
-	bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
-					      mdev->overflow_period);
+	/* Configure the PHC */
+	mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
+	snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
 
-	if (timeout) {
-		timecounter_read(&mdev->clock);
-		mdev->last_overflow_check = jiffies;
+	mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info,
+					     &mdev->pdev->dev);
+	if (IS_ERR(mdev->ptp_clock)) {
+		mdev->ptp_clock = NULL;
+		mlx4_err(mdev, "ptp_clock_register failed\n");
+	} else {
+		mlx4_info(mdev, "registered PHC clock\n");
 	}
+
 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 3a098cc4d349..70e95324a97d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -161,12 +161,16 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
 	cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
 	cq->mcq.event = mlx4_en_cq_event;
 
-	if (!cq->is_tx) {
+	if (cq->is_tx) {
+		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
+			       NAPI_POLL_WEIGHT);
+	} else {
 		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
 		napi_hash_add(&cq->napi);
-		napi_enable(&cq->napi);
 	}
 
+	napi_enable(&cq->napi);
+
 	return 0;
 }
 
@@ -188,12 +192,12 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
 
 void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
 {
+	napi_disable(&cq->napi);
 	if (!cq->is_tx) {
-		napi_disable(&cq->napi);
 		napi_hash_del(&cq->napi);
 		synchronize_rcu();
-		netif_napi_del(&cq->napi);
 	}
+	netif_napi_del(&cq->napi);
 
 	mlx4_cq_free(priv->mdev->dev, &cq->mcq);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 0596f9f85a0e..3e8d33605fe7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1193,6 +1193,9 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
 		info->rx_filters =
 			(1 << HWTSTAMP_FILTER_NONE) |
 			(1 << HWTSTAMP_FILTER_ALL);
+
+		if (mdev->ptp_clock)
+			info->phc_index = ptp_clock_index(mdev->ptp_clock);
 	}
 
 	return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 0d087b03a7b0..d357bf5a4686 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -174,6 +174,9 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
 		mlx4_err(mdev, "Internal error detected, restarting device\n");
 		break;
 
+	case MLX4_DEV_EVENT_SLAVE_INIT:
+	case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
+		break;
 	default:
 		if (port < 1 || port > dev->caps.num_ports ||
 		    !mdev->pndev[port])
@@ -196,6 +199,9 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
 		if (mdev->pndev[i])
 			mlx4_en_destroy_netdev(mdev->pndev[i]);
 
+	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+		mlx4_en_remove_timestamp(mdev);
+
 	flush_workqueue(mdev->workqueue);
 	destroy_workqueue(mdev->workqueue);
 	(void) mlx4_mr_free(dev, &mdev->mr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index e72d8a112a6b..fad45316200a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -468,6 +468,53 @@ static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
 	memset(&dst_mac[ETH_ALEN], 0, 2);
 }
 
+
+static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
+				    int qpn, u64 *reg_id)
+{
+	int err;
+	struct mlx4_spec_list spec_eth_outer = { {NULL} };
+	struct mlx4_spec_list spec_vxlan     = { {NULL} };
+	struct mlx4_spec_list spec_eth_inner = { {NULL} };
+
+	struct mlx4_net_trans_rule rule = {
+		.queue_mode = MLX4_NET_TRANS_Q_FIFO,
+		.exclusive = 0,
+		.allow_loopback = 1,
+		.promisc_mode = MLX4_FS_REGULAR,
+		.priority = MLX4_DOMAIN_NIC,
+	};
+
+	__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+	if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+		return 0; /* do nothing */
+
+	rule.port = priv->port;
+	rule.qpn = qpn;
+	INIT_LIST_HEAD(&rule.list);
+
+	spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
+	memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
+	memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+
+	spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN;    /* any vxlan header */
+	spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH;	 /* any inner eth header */
+
+	list_add_tail(&spec_eth_outer.list, &rule.list);
+	list_add_tail(&spec_vxlan.list,     &rule.list);
+	list_add_tail(&spec_eth_inner.list, &rule.list);
+
+	err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
+	if (err) {
+		en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
+		return err;
+	}
+	en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
+	return 0;
+}
+
+
 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
 				unsigned char *mac, int *qpn, u64 *reg_id)
 {
@@ -585,6 +632,11 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
 	if (err)
 		goto steer_err;
 
+	err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
+				       &priv->tunnel_reg_id);
+	if (err)
+		goto tunnel_err;
+
 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
 	if (!entry) {
 		err = -ENOMEM;
@@ -599,6 +651,9 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
 	return 0;
 
 alloc_err:
+	if (priv->tunnel_reg_id)
+		mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+tunnel_err:
 	mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
 
 steer_err:
@@ -642,6 +697,11 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
 			}
 		}
 
+		if (priv->tunnel_reg_id) {
+			mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+			priv->tunnel_reg_id = 0;
+		}
+
 		en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
 		       priv->port, qpn);
 		mlx4_qp_release_range(dev, qpn, 1);
@@ -782,7 +842,7 @@ static void update_mclist_flags(struct mlx4_en_priv *priv,
 	list_for_each_entry(dst_tmp, dst, list) {
 		found = false;
 		list_for_each_entry(src_tmp, src, list) {
-			if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
+			if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
 				found = true;
 				break;
 			}
@@ -797,7 +857,7 @@ static void update_mclist_flags(struct mlx4_en_priv *priv,
 	list_for_each_entry(src_tmp, src, list) {
 		found = false;
 		list_for_each_entry(dst_tmp, dst, list) {
-			if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
+			if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
 				dst_tmp->action = MCLIST_NONE;
 				found = true;
 				break;
@@ -1044,6 +1104,12 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
 				if (err)
 					en_err(priv, "Fail to detach multicast address\n");
 
+				if (mclist->tunnel_reg_id) {
+					err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
+					if (err)
+						en_err(priv, "Failed to detach multicast address\n");
+				}
+
 				/* remove from list */
 				list_del(&mclist->list);
 				kfree(mclist);
@@ -1061,6 +1127,10 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
 				if (err)
 					en_err(priv, "Fail to attach multicast address\n");
 
+				err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
+							       &mclist->tunnel_reg_id);
+				if (err)
+					en_err(priv, "Failed to attach multicast address\n");
 			}
 		}
 	}
@@ -1598,6 +1668,15 @@ int mlx4_en_start_port(struct net_device *dev)
 		goto tx_err;
 	}
 
+	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+		err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
+		if (err) {
+			en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
+			       err);
+			goto tx_err;
+		}
+	}
+
 	/* Init port */
 	en_dbg(HW, priv, "Initializing port\n");
 	err = mlx4_INIT_PORT(mdev->dev, priv->port);
@@ -1910,8 +1989,10 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 				      prof->tx_ring_size, i, TX, node))
 			goto err;
 
-		if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
-					   prof->tx_ring_size, TXBB_SIZE, node))
+		if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
+					   priv->base_tx_qpn + i,
+					   prof->tx_ring_size, TXBB_SIZE,
+					   node, i))
 			goto err;
 	}
 
@@ -2025,7 +2106,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
 	return 0;
 }
 
-static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_en_dev *mdev = priv->mdev;
@@ -2084,11 +2165,21 @@ static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
 			    sizeof(config)) ? -EFAULT : 0;
 }
 
+static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+
+	return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
+			    sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
+}
+
 static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
 	switch (cmd) {
 	case SIOCSHWTSTAMP:
-		return mlx4_en_hwtstamp_ioctl(dev, ifr);
+		return mlx4_en_hwtstamp_set(dev, ifr);
+	case SIOCGHWTSTAMP:
+		return mlx4_en_hwtstamp_get(dev, ifr);
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -2154,6 +2245,27 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st
 
 	return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
 }
+
+#define PORT_ID_BYTE_LEN 8
+static int mlx4_en_get_phys_port_id(struct net_device *dev,
+				    struct netdev_phys_port_id *ppid)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_dev *mdev = priv->mdev->dev;
+	int i;
+	u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
+
+	if (!phys_port_id)
+		return -EOPNOTSUPP;
+
+	ppid->id_len = sizeof(phys_port_id);
+	for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
+		ppid->id[i] =  phys_port_id & 0xff;
+		phys_port_id >>= 8;
+	}
+	return 0;
+}
+
 static const struct net_device_ops mlx4_netdev_ops = {
 	.ndo_open		= mlx4_en_open,
 	.ndo_stop		= mlx4_en_close,
@@ -2179,6 +2291,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
 #ifdef CONFIG_NET_RX_BUSY_POLL
 	.ndo_busy_poll		= mlx4_en_low_latency_recv,
 #endif
+	.ndo_get_phys_port_id	= mlx4_en_get_phys_port_id,
 };
 
 static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -2207,6 +2320,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
 #ifdef CONFIG_RFS_ACCEL
 	.ndo_rx_flow_steer	= mlx4_en_filter_rfs,
 #endif
+	.ndo_get_phys_port_id	= mlx4_en_get_phys_port_id,
 };
 
 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -2365,6 +2479,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
 	if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
 		dev->priv_flags |= IFF_UNICAST_FLT;
 
+	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+		dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+					NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
+		dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+		dev->features    |= NETIF_F_GSO_UDP_TUNNEL;
+	}
+
 	mdev->pndev[port] = dev;
 
 	netif_carrier_off(dev);
@@ -2394,6 +2515,15 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
 		goto out;
 	}
 
+	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+		err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
+		if (err) {
+			en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
+			       err);
+			goto out;
+		}
+	}
+
 	/* Init port */
 	en_warn(priv, "Initializing port\n");
 	err = mlx4_INIT_PORT(mdev->dev, priv->port);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index d3f508697a3d..f1a5500ff72d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -68,6 +68,12 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
 	context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
 	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
 		context->param3 |= cpu_to_be32(1 << 30);
+
+	if (!is_tx && !rss &&
+	    (mdev->dev->caps.tunnel_offload_mode ==  MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)) {
+		en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn);
+		context->srqn = cpu_to_be32(7 << 28); /* this fills bits 30:28 */
+	}
 }
 
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 07a1d0fbae47..890922c1c8ee 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -631,6 +631,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 	int ip_summed;
 	int factor = priv->cqe_factor;
 	u64 timestamp;
+	bool l2_tunnel;
 
 	if (!priv->port_up)
 		return 0;
@@ -709,6 +710,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 		length -= ring->fcs_del;
 		ring->bytes += length;
 		ring->packets++;
+		l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+			(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
 
 		if (likely(dev->features & NETIF_F_RXCSUM)) {
 			if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
@@ -721,7 +724,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 				 * - not an IP fragment
 				 * - no LLS polling in progress
 				 */
-				if (!mlx4_en_cq_ll_polling(cq) &&
+				if (!mlx4_en_cq_busy_polling(cq) &&
 				    (dev->features & NETIF_F_GRO)) {
 					struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
 					if (!gro_skb)
@@ -738,6 +741,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 					gro_skb->data_len = length;
 					gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
 
+					if (l2_tunnel)
+						gro_skb->encapsulation = 1;
 					if ((cqe->vlan_my_qpn &
 					    cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
 					    (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
@@ -747,7 +752,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 					}
 
 					if (dev->features & NETIF_F_RXHASH)
-						gro_skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
+						skb_set_hash(gro_skb,
+							     be32_to_cpu(cqe->immed_rss_invalid),
+							     PKT_HASH_TYPE_L3);
 
 					skb_record_rx_queue(gro_skb, cq->ring);
 
@@ -788,8 +795,13 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 		skb->protocol = eth_type_trans(skb, dev);
 		skb_record_rx_queue(skb, cq->ring);
 
+		if (l2_tunnel)
+			skb->encapsulation = 1;
+
 		if (dev->features & NETIF_F_RXHASH)
-			skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
+			skb_set_hash(skb,
+				     be32_to_cpu(cqe->immed_rss_invalid),
+				     PKT_HASH_TYPE_L3);
 
 		if ((be32_to_cpu(cqe->vlan_my_qpn) &
 		    MLX4_CQE_VLAN_PRESENT_MASK) &&
@@ -804,8 +816,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 
 		skb_mark_napi_id(skb, &cq->napi);
 
-		/* Push it up the stack */
-		netif_receive_skb(skb);
+		if (!mlx4_en_cq_busy_polling(cq))
+			napi_gro_receive(&cq->napi, skb);
+		else
+			netif_receive_skb(skb);
 
 next:
 		for (nr = 0; nr < priv->num_frags; nr++)
@@ -1053,6 +1067,12 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
 		rss_mask |=  MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
 		rss_context->base_qpn_udp = rss_context->default_qpn;
 	}
+
+	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+		en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n");
+		rss_mask |= MLX4_RSS_BY_INNER_HEADERS;
+	}
+
 	rss_context->flags = rss_mask;
 	rss_context->hash_fn = MLX4_RSS_HASH_TOP;
 	for (i = 0; i < 10; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index a7fcd593b2db..8e8a7eb43a2c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -39,6 +39,7 @@
 #include <linux/if_vlan.h>
 #include <linux/vmalloc.h>
 #include <linux/tcp.h>
+#include <linux/ip.h>
 #include <linux/moduleparam.h>
 
 #include "mlx4_en.h"
@@ -55,7 +56,7 @@ MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
 
 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
 			   struct mlx4_en_tx_ring **pring, int qpn, u32 size,
-			   u16 stride, int node)
+			   u16 stride, int node, int queue_index)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_en_tx_ring *ring;
@@ -140,6 +141,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
 		ring->bf_enabled = true;
 
 	ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
+	ring->queue_index = queue_index;
+
+	if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
+		cpumask_set_cpu(queue_index, &ring->affinity_mask);
 
 	*pring = ring;
 	return 0;
@@ -206,6 +211,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
 
 	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
 			       &ring->qp, &ring->qp_state);
+	if (!user_prio && cpu_online(ring->queue_index))
+		netif_set_xps_queue(priv->dev, &ring->affinity_mask,
+				    ring->queue_index);
 
 	return err;
 }
@@ -317,7 +325,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
 			}
 		}
 	}
-	dev_kfree_skb_any(skb);
+	dev_kfree_skb(skb);
 	return tx_info->nr_txbb;
 }
 
@@ -354,7 +362,9 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
 	return cnt;
 }
 
-static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+static int mlx4_en_process_tx_cq(struct net_device *dev,
+				 struct mlx4_en_cq *cq,
+				 int budget)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_cq *mcq = &cq->mcq;
@@ -372,9 +382,10 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
 	u32 bytes = 0;
 	int factor = priv->cqe_factor;
 	u64 timestamp = 0;
+	int done = 0;
 
 	if (!priv->port_up)
-		return;
+		return 0;
 
 	index = cons_index & size_mask;
 	cqe = &buf[(index << factor) + factor];
@@ -383,7 +394,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
 
 	/* Process all completed CQEs */
 	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
-			cons_index & size)) {
+			cons_index & size) && (done < budget)) {
 		/*
 		 * make sure we read the CQE after we read the
 		 * ownership bit
@@ -421,7 +432,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
 			txbbs_stamp = txbbs_skipped;
 			packets++;
 			bytes += ring->tx_info[ring_index].nr_bytes;
-		} while (ring_index != new_index);
+		} while ((++done < budget) && (ring_index != new_index));
 
 		++cons_index;
 		index = cons_index & size_mask;
@@ -447,6 +458,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
 		netif_tx_wake_queue(ring->tx_queue);
 		priv->port_stats.wake_queue++;
 	}
+	return done;
 }
 
 void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -454,10 +466,31 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
 	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
 	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
 
-	mlx4_en_process_tx_cq(cq->dev, cq);
-	mlx4_en_arm_cq(priv, cq);
+	if (priv->port_up)
+		napi_schedule(&cq->napi);
+	else
+		mlx4_en_arm_cq(priv, cq);
 }
 
+/* TX CQ polling - called by NAPI */
+int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
+{
+	struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
+	struct net_device *dev = cq->dev;
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	int done;
+
+	done = mlx4_en_process_tx_cq(dev, cq, budget);
+
+	/* If we used up all the quota - we're probably not done yet... */
+	if (done < budget) {
+		/* Done for now */
+		napi_complete(napi);
+		mlx4_en_arm_cq(priv, cq);
+		return done;
+	}
+	return budget;
+}
 
 static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
 						      struct mlx4_en_tx_ring *ring,
@@ -528,7 +561,10 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
 	int real_size;
 
 	if (skb_is_gso(skb)) {
-		*lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		if (skb->encapsulation)
+			*lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
+		else
+			*lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
 		real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
 			ALIGN(*lso_header_size + 4, DS_SIZE);
 		if (unlikely(*lso_header_size != skb_headlen(skb))) {
@@ -828,6 +864,14 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
 		tx_info->inl = 1;
 	}
 
+	if (skb->encapsulation) {
+		struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb);
+		if (ipv4->protocol == IPPROTO_TCP || ipv4->protocol == IPPROTO_UDP)
+			op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP);
+		else
+			op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP);
+	}
+
 	ring->prod += nr_txbb;
 
 	/* If we used a bounce buffer then copy descriptor back into place */
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index c9cdb2a2c596..8992b38578d5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -31,7 +31,6 @@
  * SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/export.h>
@@ -963,7 +962,7 @@ err_out_free_mtt:
 	mlx4_mtt_cleanup(dev, &eq->mtt);
 
 err_out_free_eq:
-	mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
+	mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
 
 err_out_free_pages:
 	for (i = 0; i < npages; ++i)
@@ -1018,7 +1017,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
 				    eq->page_list[i].map);
 
 	kfree(eq->page_list);
-	mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
+	mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
 	mlx4_free_cmd_mailbox(dev, mailbox);
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 4bd2d80d065e..91b69ff4b4a2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -134,7 +134,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
 		[5] = "Time stamping support",
 		[6] = "VST (control vlan insertion/stripping) support",
 		[7] = "FSM (MAC anti-spoofing) support",
-		[8] = "Dynamic QP updates support"
+		[8] = "Dynamic QP updates support",
+		[9] = "TCP/IP offloads/flow-steering for VXLAN support"
 	};
 	int i;
 
@@ -207,25 +208,25 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
 
 /* when opcode modifier = 1 */
 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET		0x3
-#define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET	0x8
-#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET		0xc
+#define QUERY_FUNC_CAP_FLAGS0_OFFSET		0x8
+#define QUERY_FUNC_CAP_FLAGS1_OFFSET		0xc
 
 #define QUERY_FUNC_CAP_QP0_TUNNEL		0x10
 #define QUERY_FUNC_CAP_QP0_PROXY		0x14
 #define QUERY_FUNC_CAP_QP1_TUNNEL		0x18
 #define QUERY_FUNC_CAP_QP1_PROXY		0x1c
+#define QUERY_FUNC_CAP_PHYS_PORT_ID		0x28
 
-#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC	0x40
-#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN	0x80
+#define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC		0x40
+#define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN	0x80
+#define QUERY_FUNC_CAP_FLAGS1_NIC_INFO			0x10
 
-#define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80
+#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
 
 	if (vhcr->op_modifier == 1) {
-		field = 0;
-		/* ensure force vlan and force mac bits are not set */
-		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
-		/* ensure that phy_wqe_gid bit is not set */
-		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
+		/* Set nic_info bit to mark new fields support */
+		field  = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
+		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
 
 		field = vhcr->in_modifier; /* phys-port = logical-port */
 		MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
@@ -243,6 +244,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
 		size += 2;
 		MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
 
+		MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
+			 QUERY_FUNC_CAP_PHYS_PORT_ID);
+
 	} else if (vhcr->op_modifier == 0) {
 		/* enable rdma and ethernet interfaces, and new quota locations */
 		field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
@@ -391,22 +395,22 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
 		goto out;
 	}
 
+	MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
 	if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
-		MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
-		if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
+		if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_OFFSET) {
 			mlx4_err(dev, "VLAN is enforced on this port\n");
 			err = -EPROTONOSUPPORT;
 			goto out;
 		}
 
-		if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
+		if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) {
 			mlx4_err(dev, "Force mac is enabled on this port\n");
 			err = -EPROTONOSUPPORT;
 			goto out;
 		}
 	} else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
-		MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
-		if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
+		MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
+		if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
 			mlx4_err(dev, "phy_wqe_gid is "
 				 "enforced on this ib port\n");
 			err = -EPROTONOSUPPORT;
@@ -433,6 +437,10 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
 	MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
 	func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
 
+	if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO)
+		MLX4_GET(func_cap->phys_port_id, outbox,
+			 QUERY_FUNC_CAP_PHYS_PORT_ID);
+
 	/* All other resources are allocated by the master, but we still report
 	 * 'num' and 'reserved' capabilities as follows:
 	 * - num remains the maximum resource index
@@ -530,6 +538,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET		0x98
 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET		0xa0
 #define QUERY_DEV_CAP_FW_REASSIGN_MAC		0x9d
+#define QUERY_DEV_CAP_VXLAN			0x9e
 
 	dev_cap->flags2 = 0;
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -698,6 +707,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 	MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
 	if (field & 1<<6)
 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
+	MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
+	if (field & 1<<3)
+		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS;
 	MLX4_GET(dev_cap->max_icm_sz, outbox,
 		 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
 	if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -846,6 +858,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
 	field &= 0x7f;
 	MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
 
+	/* For guests, disable vxlan tunneling */
+	MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
+	field &= 0xf7;
+	MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
+
 	/* For guests, report Blueflame disabled */
 	MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
 	field &= 0x7f;
@@ -1277,6 +1294,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
 #define INIT_HCA_IN_SIZE		 0x200
 #define INIT_HCA_VERSION_OFFSET		 0x000
 #define	 INIT_HCA_VERSION		 2
+#define INIT_HCA_VXLAN_OFFSET		 0x0c
 #define INIT_HCA_CACHELINE_SZ_OFFSET	 0x0e
 #define INIT_HCA_FLAGS_OFFSET		 0x014
 #define INIT_HCA_QPC_OFFSET		 0x020
@@ -1435,6 +1453,12 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
 	MLX4_PUT(inbox, param->uar_page_sz,	INIT_HCA_UAR_PAGE_SZ_OFFSET);
 	MLX4_PUT(inbox, param->log_uar_sz,      INIT_HCA_LOG_UAR_SZ_OFFSET);
 
+	/* set parser VXLAN attributes */
+	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) {
+		u8 parser_params = 0;
+		MLX4_PUT(inbox, parser_params,	INIT_HCA_VXLAN_OFFSET);
+	}
+
 	err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
 		       MLX4_CMD_NATIVE);
 
@@ -1723,6 +1747,43 @@ int mlx4_NOP(struct mlx4_dev *dev)
 	return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
 }
 
+int mlx4_get_phys_port_id(struct mlx4_dev *dev)
+{
+	u8 port;
+	u32 *outbox;
+	struct mlx4_cmd_mailbox *mailbox;
+	u32 in_mod;
+	u32 guid_hi, guid_lo;
+	int err, ret = 0;
+#define MOD_STAT_CFG_PORT_OFFSET 8
+#define MOD_STAT_CFG_GUID_H	 0X14
+#define MOD_STAT_CFG_GUID_L	 0X1c
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+	outbox = mailbox->buf;
+
+	for (port = 1; port <= dev->caps.num_ports; port++) {
+		in_mod = port << MOD_STAT_CFG_PORT_OFFSET;
+		err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2,
+				   MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
+				   MLX4_CMD_NATIVE);
+		if (err) {
+			mlx4_err(dev, "Fail to get port %d uplink guid\n",
+				 port);
+			ret = err;
+		} else {
+			MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H);
+			MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L);
+			dev->caps.phys_port_id[port] = (u64)guid_lo |
+						       (u64)guid_hi << 32;
+		}
+	}
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return ret;
+}
+
 #define MLX4_WOL_SETUP_MODE (5 << 28)
 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
 {
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index a0a368b7c939..6811ee00ba7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -140,6 +140,8 @@ struct mlx4_func_cap {
 	u32	qp1_proxy_qpn;
 	u8	physical_port;
 	u8	port_flags;
+	u8	flags1;
+	u64	phys_port_id;
 };
 
 struct mlx4_adapter {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 01fc6515384d..d711158b0d4b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -96,10 +96,10 @@ MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
 					 " To activate device managed"
 					 " flow steering when available, set to -1");
 
-static bool enable_64b_cqe_eqe;
+static bool enable_64b_cqe_eqe = true;
 module_param(enable_64b_cqe_eqe, bool, 0444);
 MODULE_PARM_DESC(enable_64b_cqe_eqe,
-		 "Enable 64 byte CQEs/EQEs when the FW supports this");
+		 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
 
 #define HCA_GLOBAL_CAP_MASK            0
 
@@ -388,6 +388,84 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 
 	return 0;
 }
+
+static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
+				       enum pci_bus_speed *speed,
+				       enum pcie_link_width *width)
+{
+	u32 lnkcap1, lnkcap2;
+	int err1, err2;
+
+#define  PCIE_MLW_CAP_SHIFT 4	/* start of MLW mask in link capabilities */
+
+	*speed = PCI_SPEED_UNKNOWN;
+	*width = PCIE_LNK_WIDTH_UNKNOWN;
+
+	err1 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP, &lnkcap1);
+	err2 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP2, &lnkcap2);
+	if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
+		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
+			*speed = PCIE_SPEED_8_0GT;
+		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
+			*speed = PCIE_SPEED_5_0GT;
+		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
+			*speed = PCIE_SPEED_2_5GT;
+	}
+	if (!err1) {
+		*width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
+		if (!lnkcap2) { /* pre-r3.0 */
+			if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
+				*speed = PCIE_SPEED_5_0GT;
+			else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
+				*speed = PCIE_SPEED_2_5GT;
+		}
+	}
+
+	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) {
+		return err1 ? err1 :
+			err2 ? err2 : -EINVAL;
+	}
+	return 0;
+}
+
+static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
+{
+	enum pcie_link_width width, width_cap;
+	enum pci_bus_speed speed, speed_cap;
+	int err;
+
+#define PCIE_SPEED_STR(speed) \
+	(speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
+	 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
+	 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
+	 "Unknown")
+
+	err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap);
+	if (err) {
+		mlx4_warn(dev,
+			  "Unable to determine PCIe device BW capabilities\n");
+		return;
+	}
+
+	err = pcie_get_minimum_link(dev->pdev, &speed, &width);
+	if (err || speed == PCI_SPEED_UNKNOWN ||
+	    width == PCIE_LNK_WIDTH_UNKNOWN) {
+		mlx4_warn(dev,
+			  "Unable to determine PCI device chain minimum BW\n");
+		return;
+	}
+
+	if (width != width_cap || speed != speed_cap)
+		mlx4_warn(dev,
+			  "PCIe BW is different than device's capability\n");
+
+	mlx4_info(dev, "PCIe link speed is %s, device supports %s\n",
+		  PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
+	mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n",
+		  width, width_cap);
+	return;
+}
+
 /*The function checks if there are live vf, return the num of them*/
 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
 {
@@ -606,6 +684,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
 		dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
 		dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
 		dev->caps.port_mask[i] = dev->caps.port_type[i];
+		dev->caps.phys_port_id[i] = func_cap.phys_port_id;
 		if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
 						    &dev->caps.gid_table_len[i],
 						    &dev->caps.pkey_table_len[i]))
@@ -1443,6 +1522,19 @@ static void choose_steering_mode(struct mlx4_dev *dev,
 		 mlx4_log_num_mgm_entry_size);
 }
 
+static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
+				       struct mlx4_dev_cap *dev_cap)
+{
+	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
+	    dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
+		dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
+	else
+		dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
+
+	mlx4_dbg(dev, "Tunneling offload mode is: %s\n",  (dev->caps.tunnel_offload_mode
+		 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
+}
+
 static int mlx4_init_hca(struct mlx4_dev *dev)
 {
 	struct mlx4_priv	  *priv = mlx4_priv(dev);
@@ -1483,6 +1575,11 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
 		}
 
 		choose_steering_mode(dev, &dev_cap);
+		choose_tunnel_offload_mode(dev, &dev_cap);
+
+		err = mlx4_get_phys_port_id(dev);
+		if (err)
+			mlx4_err(dev, "Fail to get physical port id\n");
 
 		if (mlx4_is_master(dev))
 			mlx4_parav_master_pf_caps(dev);
@@ -1654,7 +1751,7 @@ EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
 
 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
 {
-	mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
+	mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
 	return;
 }
 
@@ -2287,6 +2384,12 @@ slave_start:
 			goto err_mfunc;
 	}
 
+	/* check if the device is functioning at its maximum possible speed.
+	 * No return code for this call, just warn the user in case of PCI
+	 * express device capabilities are under-satisfied by the bus.
+	 */
+	mlx4_check_pcie_caps(dev);
+
 	/* In master functions, the communication channel must be initialized
 	 * after obtaining its address from fw */
 	if (mlx4_is_master(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 34dffcf61bff..db7dc0b6667d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -125,9 +125,14 @@ static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port,
 					      enum mlx4_steer_type steer,
 					      u32 qpn)
 {
-	struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[port - 1];
+	struct mlx4_steer *s_steer;
 	struct mlx4_promisc_qp *pqp;
 
+	if (port < 1 || port > dev->caps.num_ports)
+		return NULL;
+
+	s_steer = &mlx4_priv(dev)->steer[port - 1];
+
 	list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
 		if (pqp->qpn == qpn)
 			return pqp;
@@ -154,6 +159,9 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port,
 	u32 prot;
 	int err;
 
+	if (port < 1 || port > dev->caps.num_ports)
+		return -EINVAL;
+
 	s_steer = &mlx4_priv(dev)->steer[port - 1];
 	new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
 	if (!new_entry)
@@ -238,6 +246,9 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
 	struct mlx4_promisc_qp *pqp;
 	struct mlx4_promisc_qp *dqp;
 
+	if (port < 1 || port > dev->caps.num_ports)
+		return -EINVAL;
+
 	s_steer = &mlx4_priv(dev)->steer[port - 1];
 
 	pqp = get_promisc_qp(dev, port, steer, qpn);
@@ -283,6 +294,9 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
 	struct mlx4_steer_index *tmp_entry, *entry = NULL;
 	struct mlx4_promisc_qp *dqp, *tmp_dqp;
 
+	if (port < 1 || port > dev->caps.num_ports)
+		return NULL;
+
 	s_steer = &mlx4_priv(dev)->steer[port - 1];
 
 	/* if qp is not promisc, it cannot be duplicated */
@@ -324,6 +338,9 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
 	bool ret = false;
 	int i;
 
+	if (port < 1 || port > dev->caps.num_ports)
+		return NULL;
+
 	s_steer = &mlx4_priv(dev)->steer[port - 1];
 
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -378,6 +395,9 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
 	int err;
 	struct mlx4_priv *priv = mlx4_priv(dev);
 
+	if (port < 1 || port > dev->caps.num_ports)
+		return -EINVAL;
+
 	s_steer = &mlx4_priv(dev)->steer[port - 1];
 
 	mutex_lock(&priv->mcg_table.mutex);
@@ -484,6 +504,9 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
 	int loc, i;
 	int err;
 
+	if (port < 1 || port > dev->caps.num_ports)
+		return -EINVAL;
+
 	s_steer = &mlx4_priv(dev)->steer[port - 1];
 	mutex_lock(&priv->mcg_table.mutex);
 
@@ -674,7 +697,8 @@ const u16 __sw_id_hw[] = {
 	[MLX4_NET_TRANS_RULE_ID_IPV6]    = 0xE003,
 	[MLX4_NET_TRANS_RULE_ID_IPV4]    = 0xE002,
 	[MLX4_NET_TRANS_RULE_ID_TCP]     = 0xE004,
-	[MLX4_NET_TRANS_RULE_ID_UDP]     = 0xE006
+	[MLX4_NET_TRANS_RULE_ID_UDP]     = 0xE006,
+	[MLX4_NET_TRANS_RULE_ID_VXLAN]	 = 0xE008
 };
 
 int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
@@ -699,7 +723,9 @@ static const int __rule_hw_sz[] = {
 	[MLX4_NET_TRANS_RULE_ID_TCP] =
 		sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
 	[MLX4_NET_TRANS_RULE_ID_UDP] =
-		sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
+		sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
+	[MLX4_NET_TRANS_RULE_ID_VXLAN] =
+		sizeof(struct mlx4_net_trans_rule_hw_vxlan)
 };
 
 int mlx4_hw_rule_sz(struct mlx4_dev *dev,
@@ -764,6 +790,13 @@ static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
 		rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
 		break;
 
+	case MLX4_NET_TRANS_RULE_ID_VXLAN:
+		rule_hw->vxlan.vni =
+			cpu_to_be32(be32_to_cpu(spec->vxlan.vni) << 8);
+		rule_hw->vxlan.vni_mask =
+			cpu_to_be32(be32_to_cpu(spec->vxlan.vni_mask) << 8);
+		break;
+
 	default:
 		return -EINVAL;
 	}
@@ -1013,7 +1046,7 @@ out:
 				  index, dev->caps.num_mgms);
 		else
 			mlx4_bitmap_free(&priv->mcg_table.bitmap,
-					 index - dev->caps.num_mgms);
+					 index - dev->caps.num_mgms, MLX4_USE_RR);
 	}
 	mutex_unlock(&priv->mcg_table.mutex);
 
@@ -1104,7 +1137,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 					  index, amgm_index, dev->caps.num_mgms);
 			else
 				mlx4_bitmap_free(&priv->mcg_table.bitmap,
-						 amgm_index - dev->caps.num_mgms);
+						 amgm_index - dev->caps.num_mgms, MLX4_USE_RR);
 		}
 	} else {
 		/* Remove entry from AMGM */
@@ -1124,7 +1157,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 				  prev, index, dev->caps.num_mgms);
 		else
 			mlx4_bitmap_free(&priv->mcg_table.bitmap,
-					 index - dev->caps.num_mgms);
+					 index - dev->caps.num_mgms, MLX4_USE_RR);
 	}
 
 out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 59f67f9086dc..6b65f7795215 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -783,6 +783,11 @@ enum {
 	MLX4_PCI_DEV_FORCE_SENSE_PORT	= 1 << 1,
 };
 
+enum {
+	MLX4_NO_RR	= 0,
+	MLX4_USE_RR	= 1,
+};
+
 struct mlx4_priv {
 	struct mlx4_dev		dev;
 
@@ -844,9 +849,10 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
 extern struct workqueue_struct *mlx4_wq;
 
 u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
-void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
+void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr);
 u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
-void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
+			    int use_rr);
 u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap);
 int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
 		     u32 reserved_bot, u32 resetrved_top);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d5758adceaa2..3af04c3f42ea 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -45,6 +45,7 @@
 #include <linux/dcbnl.h>
 #endif
 #include <linux/cpu_rmap.h>
+#include <linux/ptp_clock_kernel.h>
 
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/qp.h>
@@ -255,6 +256,8 @@ struct mlx4_en_tx_ring {
 	u16 poll_cnt;
 	struct mlx4_en_tx_info *tx_info;
 	u8 *bounce_buf;
+	u8 queue_index;
+	cpumask_t affinity_mask;
 	u32 last_nr_txbb;
 	struct mlx4_qp qp;
 	struct mlx4_qp_context context;
@@ -373,10 +376,14 @@ struct mlx4_en_dev {
 	u32                     priv_pdn;
 	spinlock_t              uar_lock;
 	u8			mac_removed[MLX4_MAX_PORTS + 1];
+	rwlock_t		clock_lock;
+	u32			nominal_c_mult;
 	struct cyclecounter	cycles;
 	struct timecounter	clock;
 	unsigned long		last_overflow_check;
 	unsigned long		overflow_period;
+	struct ptp_clock	*ptp_clock;
+	struct ptp_clock_info	ptp_clock_info;
 };
 
 
@@ -434,6 +441,7 @@ struct mlx4_en_mc_list {
 	enum mlx4_en_mclist_act	action;
 	u8			addr[ETH_ALEN];
 	u64			reg_id;
+	u64			tunnel_reg_id;
 };
 
 struct mlx4_en_frag_info {
@@ -565,7 +573,7 @@ struct mlx4_en_priv {
 	struct list_head filters;
 	struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
 #endif
-
+	u64 tunnel_reg_id;
 };
 
 enum mlx4_en_wol {
@@ -653,7 +661,7 @@ static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
 }
 
 /* true if a socket is polling, even if it did not get the lock */
-static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
+static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
 {
 	WARN_ON(!(cq->state & MLX4_CQ_LOCKED));
 	return cq->state & CQ_USER_PEND;
@@ -683,7 +691,7 @@ static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
 	return false;
 }
 
-static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
+static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
 {
 	return false;
 }
@@ -720,7 +728,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
 
 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
 			   struct mlx4_en_tx_ring **pring,
-			   int qpn, u32 size, u16 stride, int node);
+			   int qpn, u32 size, u16 stride,
+			   int node, int queue_index);
 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
 			     struct mlx4_en_tx_ring **pring);
 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
@@ -742,6 +751,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
 			  struct mlx4_en_cq *cq,
 			  int budget);
 int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
+int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
 void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
 		int is_tx, int rss, int qpn, int cqn, int user_prio,
 		struct mlx4_qp_context *context);
@@ -787,6 +797,7 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
 			    struct skb_shared_hwtstamps *hwts,
 			    u64 timestamp);
 void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev);
+void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev);
 int mlx4_en_timestamp_config(struct net_device *dev,
 			     int tx_type,
 			     int rx_filter);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index b3ee9bafff5e..24835853b753 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -32,7 +32,6 @@
  * SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/export.h>
 #include <linux/slab.h>
@@ -346,7 +345,7 @@ void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 
-	mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+	mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR);
 }
 
 static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 84cfb40bf451..74216071201f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -31,7 +31,6 @@
  * SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/export.h>
 #include <linux/io-mapping.h>
@@ -59,7 +58,7 @@ EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
 
 void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn)
 {
-	mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn);
+	mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn, MLX4_USE_RR);
 }
 EXPORT_SYMBOL_GPL(mlx4_pd_free);
 
@@ -96,7 +95,7 @@ EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc);
 
 void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
 {
-	mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn);
+	mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn, MLX4_USE_RR);
 }
 
 void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
@@ -164,7 +163,7 @@ EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
 
 void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
 {
-	mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index);
+	mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index, MLX4_USE_RR);
 }
 EXPORT_SYMBOL_GPL(mlx4_uar_free);
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index f50ef6a5ee5e..a58bcbf1b806 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -820,6 +820,47 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
 }
 EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
 
+enum {
+	VXLAN_ENABLE_MODIFY	= 1 << 7,
+	VXLAN_STEERING_MODIFY	= 1 << 6,
+
+	VXLAN_ENABLE		= 1 << 7,
+};
+
+struct mlx4_set_port_vxlan_context {
+	u32	reserved1;
+	u8	modify_flags;
+	u8	reserved2;
+	u8	enable_flags;
+	u8	steering;
+};
+
+int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering)
+{
+	int err;
+	u32 in_mod;
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_set_port_vxlan_context  *context;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+	context = mailbox->buf;
+	memset(context, 0, sizeof(*context));
+
+	context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
+	context->enable_flags = VXLAN_ENABLE;
+	context->steering  = steering;
+
+	in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
+	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
+
 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
 				struct mlx4_vhcr *vhcr,
 				struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2715e61dbb74..61d64ebffd56 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -35,7 +35,6 @@
 
 #include <linux/gfp.h>
 #include <linux/export.h>
-#include <linux/init.h>
 
 #include <linux/mlx4/cmd.h>
 #include <linux/mlx4/qp.h>
@@ -250,7 +249,7 @@ void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
 
 	if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
 		return;
-	mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
+	mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, MLX4_USE_RR);
 }
 
 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 663510325c22..57428a0cb9dd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1340,43 +1340,29 @@ static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
 
 	spin_lock_irq(mlx4_tlock(dev));
 	r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
-	if (!r)
+	if (!r) {
 		err = -ENOENT;
-	else if (r->com.owner != slave)
+	} else if (r->com.owner != slave) {
 		err = -EPERM;
-	else {
-		switch (state) {
-		case RES_CQ_BUSY:
-			err = -EBUSY;
-			break;
-
-		case RES_CQ_ALLOCATED:
-			if (r->com.state != RES_CQ_HW)
-				err = -EINVAL;
-			else if (atomic_read(&r->ref_count))
-				err = -EBUSY;
-			else
-				err = 0;
-			break;
-
-		case RES_CQ_HW:
-			if (r->com.state != RES_CQ_ALLOCATED)
-				err = -EINVAL;
-			else
-				err = 0;
-			break;
-
-		default:
+	} else if (state == RES_CQ_ALLOCATED) {
+		if (r->com.state != RES_CQ_HW)
 			err = -EINVAL;
-		}
+		else if (atomic_read(&r->ref_count))
+			err = -EBUSY;
+		else
+			err = 0;
+	} else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
+		err = -EINVAL;
+	} else {
+		err = 0;
+	}
 
-		if (!err) {
-			r->com.from_state = r->com.state;
-			r->com.to_state = state;
-			r->com.state = RES_CQ_BUSY;
-			if (cq)
-				*cq = r;
-		}
+	if (!err) {
+		r->com.from_state = r->com.state;
+		r->com.to_state = state;
+		r->com.state = RES_CQ_BUSY;
+		if (cq)
+			*cq = r;
 	}
 
 	spin_unlock_irq(mlx4_tlock(dev));
@@ -1385,7 +1371,7 @@ static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
 }
 
 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
-				 enum res_cq_states state, struct res_srq **srq)
+				 enum res_srq_states state, struct res_srq **srq)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
@@ -1394,39 +1380,25 @@ static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
 
 	spin_lock_irq(mlx4_tlock(dev));
 	r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
-	if (!r)
+	if (!r) {
 		err = -ENOENT;
-	else if (r->com.owner != slave)
+	} else if (r->com.owner != slave) {
 		err = -EPERM;
-	else {
-		switch (state) {
-		case RES_SRQ_BUSY:
+	} else if (state == RES_SRQ_ALLOCATED) {
+		if (r->com.state != RES_SRQ_HW)
 			err = -EINVAL;
-			break;
-
-		case RES_SRQ_ALLOCATED:
-			if (r->com.state != RES_SRQ_HW)
-				err = -EINVAL;
-			else if (atomic_read(&r->ref_count))
-				err = -EBUSY;
-			break;
-
-		case RES_SRQ_HW:
-			if (r->com.state != RES_SRQ_ALLOCATED)
-				err = -EINVAL;
-			break;
-
-		default:
-			err = -EINVAL;
-		}
+		else if (atomic_read(&r->ref_count))
+			err = -EBUSY;
+	} else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
+		err = -EINVAL;
+	}
 
-		if (!err) {
-			r->com.from_state = r->com.state;
-			r->com.to_state = state;
-			r->com.state = RES_SRQ_BUSY;
-			if (srq)
-				*srq = r;
-		}
+	if (!err) {
+		r->com.from_state = r->com.state;
+		r->com.to_state = state;
+		r->com.state = RES_SRQ_BUSY;
+		if (srq)
+			*srq = r;
 	}
 
 	spin_unlock_irq(mlx4_tlock(dev));
@@ -3634,7 +3606,7 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
 	    !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
 		list_for_each_entry_safe(res, tmp, rlist, list) {
 			be_mac = cpu_to_be64(res->mac << 16);
-			if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
+			if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
 				return 0;
 		}
 		pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 8fdf23753779..98faf870b0b0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -31,7 +31,6 @@
  * SOFTWARE.
  */
 
-#include <linux/init.h>
 
 #include <linux/mlx4/cmd.h>
 #include <linux/mlx4/srq.h>
@@ -117,7 +116,7 @@ err_put:
 	mlx4_table_put(dev, &srq_table->table, *srqn);
 
 err_out:
-	mlx4_bitmap_free(&srq_table->bitmap, *srqn);
+	mlx4_bitmap_free(&srq_table->bitmap, *srqn, MLX4_NO_RR);
 	return err;
 }
 
@@ -145,7 +144,7 @@ void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
 
 	mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
 	mlx4_table_put(dev, &srq_table->table, srqn);
-	mlx4_bitmap_free(&srq_table->bitmap, srqn);
+	mlx4_bitmap_free(&srq_table->bitmap, srqn, MLX4_NO_RR);
 }
 
 static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 8675d26a678b..405c4fbcd0ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -32,7 +32,6 @@
 
 #include <asm-generic/kmap_types.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index 106eb972f2ac..16435b3cfa9f 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -21,7 +21,6 @@
 #include <linux/ioport.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index ddd252a3da9c..ce84dc289c8f 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4128,10 +4128,10 @@ static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
 	int i;
 	int j = ADDITIONAL_ENTRIES;
 
-	if (!memcmp(hw->override_addr, mac_addr, ETH_ALEN))
+	if (ether_addr_equal(hw->override_addr, mac_addr))
 		return 0;
 	for (i = 0; i < hw->addr_list_size; i++) {
-		if (!memcmp(hw->address[i], mac_addr, ETH_ALEN))
+		if (ether_addr_equal(hw->address[i], mac_addr))
 			return 0;
 		if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
 			j = i;
@@ -4149,7 +4149,7 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
 	int i;
 
 	for (i = 0; i < hw->addr_list_size; i++) {
-		if (!memcmp(hw->address[i], mac_addr, ETH_ALEN)) {
+		if (ether_addr_equal(hw->address[i], mac_addr)) {
 			memset(hw->address[i], 0, ETH_ALEN);
 			writel(0, hw->io + ADD_ADDR_INCR * i +
 				KS_ADD_ADDR_0_HI);
@@ -5853,15 +5853,12 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 	struct dev_info *hw_priv = priv->adapter;
 	struct ksz_hw *hw = &hw_priv->hw;
 	struct ksz_port *port = &priv->port;
-	int rc;
 	int result = 0;
 	struct mii_ioctl_data *data = if_mii(ifr);
 
 	if (down_interruptible(&priv->proc_sem))
 		return -ERESTARTSYS;
 
-	/* assume success */
-	rc = 0;
 	switch (cmd) {
 	/* Get address of MII PHY in use. */
 	case SIOCGMIIPHY:
@@ -7104,8 +7101,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
 			       ETH_ALEN);
 		else {
 			memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN);
-			if (!memcmp(sw->other_addr, hw->override_addr,
-				    ETH_ALEN))
+			if (ether_addr_equal(sw->other_addr, hw->override_addr))
 				dev->dev_addr[5] += port->first_port;
 		}
 
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index cbd013379252..5020fd47825d 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -13,7 +13,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index 79257f71c5d9..a5512a97cc4d 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -24,7 +24,6 @@
 #include <linux/fcntl.h>
 #include <linux/gfp.h>
 #include <linux/interrupt.h>
-#include <linux/init.h>
 #include <linux/ioport.h>
 #include <linux/in.h>
 #include <linux/string.h>
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 04b3ec1352f1..9e4ddbba7036 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -37,7 +37,6 @@
 #include <linux/fcntl.h>
 #include <linux/gfp.h>
 #include <linux/interrupt.h>
-#include <linux/init.h>
 #include <linux/ioport.h>
 #include <linux/in.h>
 #include <linux/string.h>
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index d3b47003a575..dbccf1de49ec 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -22,8 +22,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  *
  * ChangeLog
@@ -2236,7 +2235,6 @@ out_disable:
 	pci_disable_device(pci_dev);
 out_free:
 	free_netdev(ndev);
-	pci_set_drvdata(pci_dev, NULL);
 out:
 	return err;
 }
@@ -2260,7 +2258,6 @@ static void ns83820_remove_one(struct pci_dev *pci_dev)
 			dev->rx_info.descs, dev->rx_info.phy_descs);
 	pci_disable_device(dev->pci_dev);
 	free_netdev(ndev);
-	pci_set_drvdata(pci_dev, NULL);
 }
 
 static DEFINE_PCI_DEVICE_TABLE(ns83820_pci_tbl) = {
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index fbe5363cb89c..089b713b9f7b 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -2148,7 +2148,7 @@ __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
  * __vxge_hw_ring_replenish - Initial replenish of RxDs
  * This function replenishes the RxDs from reserve array to work array
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
 {
 	void *rxd;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index f9876ea8c8bf..1ded50ca1600 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -87,6 +87,7 @@ static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
 module_param_array(bw_percentage, uint, NULL, 0);
 
 static struct vxge_drv_config *driver_config;
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
 
 static inline int is_vxge_card_up(struct vxgedev *vdev)
 {
@@ -507,7 +508,8 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
 		 * if rss is disabled/enabled, so key off of that.
 		 */
 		if (ext_info.rth_value)
-			skb->rxhash = ext_info.rth_value;
+			skb_set_hash(skb, ext_info.rth_value,
+				     PKT_HASH_TYPE_L3);
 
 		vxge_rx_complete(ring, skb, ext_info.vlan,
 			pkt_length, &ext_info);
@@ -1429,7 +1431,7 @@ vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
 		return status;
 	}
 
-	while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
+	while (!ether_addr_equal(mac->macaddr, macaddr)) {
 		status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
 				macaddr, macmask);
 		if (status != VXGE_HW_OK)
@@ -1970,7 +1972,7 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
 }
 
 /* reset vpaths */
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
 {
 	enum vxge_hw_status status = VXGE_HW_OK;
 	struct vxge_vpath *vpath;
@@ -3189,7 +3191,7 @@ static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
 	return status;
 }
 
-static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
+static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data)
 {
 	struct hwtstamp_config config;
 	int i;
@@ -3250,6 +3252,21 @@ static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
 	return 0;
 }
 
+static int vxge_hwtstamp_get(struct vxgedev *vdev, void __user *data)
+{
+	struct hwtstamp_config config;
+
+	config.flags = 0;
+	config.tx_type = HWTSTAMP_TX_OFF;
+	config.rx_filter = (vdev->rx_hwts ?
+			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+	if (copy_to_user(data, &config, sizeof(config)))
+		return -EFAULT;
+
+	return 0;
+}
+
 /**
  * vxge_ioctl
  * @dev: Device pointer.
@@ -3263,19 +3280,15 @@ static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
 static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
 	struct vxgedev *vdev = netdev_priv(dev);
-	int ret;
 
 	switch (cmd) {
 	case SIOCSHWTSTAMP:
-		ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
-		if (ret)
-			return ret;
-		break;
+		return vxge_hwtstamp_set(vdev, rq->ifr_data);
+	case SIOCGHWTSTAMP:
+		return vxge_hwtstamp_get(vdev, rq->ifr_data);
 	default:
 		return -EOPNOTSUPP;
 	}
-
-	return 0;
 }
 
 /**
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index 36ca40f8f249..3a79d93b8445 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -427,7 +427,6 @@ void vxge_os_timer(struct timer_list *timer, void (*func)(unsigned long data),
 }
 
 void vxge_initialize_ethtool_ops(struct net_device *ndev);
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
 int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
 
 /* #define VXGE_DEBUG_INIT: debug for initialization functions
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 99749bd07d72..9e1aaa7f36bb 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -1956,8 +1956,7 @@ exit:
  * @vid: vlan id to be added for this vpath into the list
  *
  * Adds the given vlan id into the list for this  vpath.
- * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
- * vxge_hw_vpath_vid_get_next
+ * see also: vxge_hw_vpath_vid_delete
  *
  */
 enum vxge_hw_status
@@ -1979,45 +1978,13 @@ exit:
 }
 
 /**
- * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
- *               from vlan id table.
- * @vp: Vpath handle.
- * @vid: Buffer to return vlan id
- *
- * Returns the first vlan id in the list for this vpath.
- * see also: vxge_hw_vpath_vid_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
-{
-	u64 data;
-	enum vxge_hw_status status = VXGE_HW_OK;
-
-	if (vp == NULL) {
-		status = VXGE_HW_ERR_INVALID_HANDLE;
-		goto exit;
-	}
-
-	status = __vxge_hw_vpath_rts_table_get(vp,
-			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
-			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
-			0, vid, &data);
-
-	*vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
-exit:
-	return status;
-}
-
-/**
  * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
  *               to vlan id table.
  * @vp: Vpath handle.
  * @vid: vlan id to be added for this vpath into the list
  *
  * Adds the given vlan id into the list for this  vpath.
- * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
- * vxge_hw_vpath_vid_get_next
+ * see also: vxge_hw_vpath_vid_add
  *
  */
 enum vxge_hw_status
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
index 4a518a3b131c..ba6f833bb059 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
@@ -1918,9 +1918,6 @@ vxge_hw_ring_rxd_post_post(
 	struct __vxge_hw_ring *ring_handle,
 	void *rxdh);
 
-enum vxge_hw_status
-vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle);
-
 void
 vxge_hw_ring_rxd_post_post_wmb(
 	struct __vxge_hw_ring *ring_handle,
@@ -2186,11 +2183,6 @@ vxge_hw_vpath_vid_add(
 	u64			vid);
 
 enum vxge_hw_status
-vxge_hw_vpath_vid_get(
-	struct __vxge_hw_vpath_handle *vpath_handle,
-	u64			*vid);
-
-enum vxge_hw_status
 vxge_hw_vpath_vid_delete(
 	struct __vxge_hw_vpath_handle *vpath_handle,
 	u64			vid);
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index e6f0a4366f90..31eb911e4763 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -13,8 +13,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/init.h>
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 1e8b9514718b..70cf97fe67f2 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -26,8 +26,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * Known bugs:
  * We suspect that on some hardware no TX done interrupts are generated.
@@ -59,7 +58,6 @@
 #include <linux/skbuff.h>
 #include <linux/mii.h>
 #include <linux/random.h>
-#include <linux/init.h>
 #include <linux/if_vlan.h>
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
@@ -6020,7 +6018,6 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
 out_error:
 	if (phystate_orig)
 		writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
-	pci_set_drvdata(pci_dev, NULL);
 out_freering:
 	free_rings(dev);
 out_unmap:
@@ -6091,7 +6088,6 @@ static void nv_remove(struct pci_dev *pci_dev)
 	pci_release_regions(pci_dev);
 	pci_disable_device(pci_dev);
 	free_netdev(dev);
-	pci_set_drvdata(pci_dev, NULL);
 }
 
 #ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index ba3ca18611f7..422d9b51ac24 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -19,7 +19,6 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 2a9003071d51..2a55d6d53ee6 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #ifndef _PCH_GBE_H_
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
index ff3ad70935a6..51250363566b 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 #include "pch_gbe.h"
 #include "pch_gbe_phy.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
index 94aaac5b057b..91ce07c8306c 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 #ifndef _PCH_GBE_API_H_
 #define _PCH_GBE_API_H_
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index f0ceb89af931..826f0ccdc23c 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 #include "pch_gbe.h"
 #include "pch_gbe_api.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 27ffe0ebf0a6..464e91058c81 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include "pch_gbe.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index cf7c9b3a255b..08d4be616064 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include "pch_gbe.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
index 8b7ff75fc8e0..a5cad5ea9436 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include "pch_gbe.h"
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
index 0cbe69206e04..95ad0151ad02 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 #ifndef _PCH_GBE_PHY_H_
 #define _PCH_GBE_PHY_H_
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 07a890eb72ad..9a6cb482dcd0 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -1053,7 +1053,7 @@ static int yellowfin_rx(struct net_device *dev)
 		struct sk_buff *rx_skb = yp->rx_skbuff[entry];
 		s16 frame_status;
 		u16 desc_status;
-		int data_size;
+		int data_size, yf_size;
 		u8 *buf_addr;
 
 		if(!desc->result_status)
@@ -1070,6 +1070,9 @@ static int yellowfin_rx(struct net_device *dev)
 			       __func__, frame_status);
 		if (--boguscnt < 0)
 			break;
+
+		yf_size = sizeof(struct yellowfin_desc);
+
 		if ( ! (desc_status & RX_EOP)) {
 			if (data_size != 0)
 				netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
@@ -1096,12 +1099,12 @@ static int yellowfin_rx(struct net_device *dev)
 			if (status2 & 0x80) dev->stats.rx_dropped++;
 #ifdef YF_PROTOTYPE		/* Support for prototype hardware errata. */
 		} else if ((yp->flags & HasMACAddrBug)  &&
-			memcmp(le32_to_cpu(yp->rx_ring_dma +
-				entry*sizeof(struct yellowfin_desc)),
-				dev->dev_addr, 6) != 0 &&
-			memcmp(le32_to_cpu(yp->rx_ring_dma +
-				entry*sizeof(struct yellowfin_desc)),
-				"\377\377\377\377\377\377", 6) != 0) {
+			!ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
+						      entry * yf_size),
+					  dev->dev_addr) &&
+			!ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
+						      entry * yf_size),
+					  "\377\377\377\377\377\377")) {
 			if (bogus_rx++ == 0)
 				netdev_warn(dev, "Bad frame to %pM\n",
 					    buf_addr);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index dbaa49e58b0c..9abf70d74b31 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -13,11 +13,9 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h
index f2749d46c125..a5807703ab96 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.h
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.h
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #ifndef PASEMI_MAC_H
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
index 4825959a0efe..25fae568261f 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
@@ -13,8 +13,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 
diff --git a/drivers/net/ethernet/qlogic/netxen/Makefile b/drivers/net/ethernet/qlogic/netxen/Makefile
index 861a0590b1f4..e14e60c88381 100644
--- a/drivers/net/ethernet/qlogic/netxen/Makefile
+++ b/drivers/net/ethernet/qlogic/netxen/Makefile
@@ -13,9 +13,7 @@
 # GNU General Public License for more details.
 #                                   
 # You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston,
-# MA  02111-1307, USA.
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
 # 
 # The full GNU General Public License is included in this distribution
 # in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 9adcdbb49476..6e426ae94692 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -14,9 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution
  * in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 1bcaf45aa864..6f6be57f4690 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -14,9 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution
  * in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 4ca2c196c98a..87e073c6e291 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -14,9 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution
  * in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
index 0c64c82b9acf..a310c2f6502a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
@@ -14,9 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution
  * in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 67efe754367d..db4280ce9c09 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -14,9 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution
  * in the file called "COPYING".
@@ -663,7 +661,7 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
 	list_for_each(head, del_list) {
 		cur = list_entry(head, nx_mac_list_t, list);
 
-		if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+		if (ether_addr_equal(addr, cur->mac_addr)) {
 			list_move_tail(head, &adapter->mac_list);
 			return 0;
 		}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
index e2c5b6f2df03..7433c4d21601 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
@@ -14,9 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution
  * in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index cc68657f0536..32058614151a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -14,9 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution
  * in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 3bec8cfebf99..70849dea32b1 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -14,9 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * The full GNU General Public License is included in this distribution
  * in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 0758b9435358..2eabd44f8914 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -8,7 +8,6 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/types.h>
 #include <linux/module.h>
 #include <linux/list.h>
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f2a7c7166e24..f19f81cde134 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -38,8 +38,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 52
-#define QLCNIC_LINUX_VERSIONID  "5.3.52"
+#define _QLCNIC_LINUX_SUBVERSION 55
+#define QLCNIC_LINUX_VERSIONID  "5.3.55"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
 		 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -105,6 +105,8 @@
 #define QLCNIC_DEF_TX_RINGS		4
 #define QLCNIC_MAX_VNIC_TX_RINGS	4
 #define QLCNIC_MAX_VNIC_SDS_RINGS	4
+#define QLCNIC_83XX_MINIMUM_VECTOR	3
+#define QLCNIC_82XX_MINIMUM_VECTOR	2
 
 enum qlcnic_queue_type {
 	QLCNIC_TX_QUEUE = 1,
@@ -115,6 +117,10 @@ enum qlcnic_queue_type {
 #define QLCNIC_VNIC_MODE	0xFF
 #define QLCNIC_DEFAULT_MODE	0x0
 
+/* Virtual NIC function count */
+#define QLC_DEFAULT_VNIC_COUNT	8
+#define QLC_84XX_VNIC_COUNT	16
+
 /*
  * Following are the states of the Phantom. Phantom will set them and
  * Host will read to check if the fields are correct.
@@ -365,6 +371,7 @@ struct qlcnic_rx_buffer {
  */
 #define QLCNIC_INTR_COAL_TYPE_RX		1
 #define QLCNIC_INTR_COAL_TYPE_TX		2
+#define QLCNIC_INTR_COAL_TYPE_RX_TX		3
 
 #define QLCNIC_DEF_INTR_COALESCE_RX_TIME_US	3
 #define QLCNIC_DEF_INTR_COALESCE_RX_PACKETS	256
@@ -374,7 +381,7 @@ struct qlcnic_rx_buffer {
 
 #define QLCNIC_INTR_DEFAULT			0x04
 #define QLCNIC_CONFIG_INTR_COALESCE		3
-#define QLCNIC_DEV_INFO_SIZE			1
+#define QLCNIC_DEV_INFO_SIZE			2
 
 struct qlcnic_nic_intr_coalesce {
 	u8	type;
@@ -462,8 +469,10 @@ struct qlcnic_hardware_context {
 	u16 max_rx_ques;
 	u16 max_mtu;
 	u32 msg_enable;
-	u16 act_pci_func;
+	u16 total_nic_func;
 	u16 max_pci_func;
+	u32 max_vnic_func;
+	u32 total_pci_func;
 
 	u32 capabilities;
 	u32 extra_capability[3];
@@ -791,9 +800,10 @@ struct qlcnic_cardrsp_tx_ctx {
 #define QLCNIC_MAC_VLAN_ADD	3
 #define QLCNIC_MAC_VLAN_DEL	4
 
-struct qlcnic_mac_list_s {
+struct qlcnic_mac_vlan_list {
 	struct list_head list;
 	uint8_t mac_addr[ETH_ALEN+2];
+	u16 vlan_id;
 };
 
 /* MAC Learn */
@@ -860,7 +870,7 @@ struct qlcnic_mac_list_s {
 #define QLCNIC_FW_CAP2_HW_LRO_IPV6		BIT_3
 #define QLCNIC_FW_CAPABILITY_SET_DRV_VER	BIT_5
 #define QLCNIC_FW_CAPABILITY_2_BEACON		BIT_7
-#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG	BIT_8
+#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG	BIT_9
 
 /* module types */
 #define LINKEVENT_MODULE_NOT_PRESENT			1
@@ -954,6 +964,7 @@ struct qlcnic_ipaddr {
 #define QLCNIC_TX_INTR_SHARED		0x10000
 #define QLCNIC_APP_CHANGED_FLAGS	0x20000
 #define QLCNIC_HAS_PHYS_PORT_ID		0x40000
+#define QLCNIC_TSS_RSS			0x80000
 
 #define QLCNIC_IS_MSI_FAMILY(adapter) \
 	((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
@@ -963,6 +974,9 @@ struct qlcnic_ipaddr {
 #define QLCNIC_BEACON_EANBLE		0xC
 #define QLCNIC_BEACON_DISABLE		0xD
 
+#define QLCNIC_BEACON_ON		2
+#define QLCNIC_BEACON_OFF		0
+
 #define QLCNIC_MSIX_TBL_SPACE		8192
 #define QLCNIC_PCI_REG_MSIX_TBL 	0x44
 #define QLCNIC_MSIX_TBL_PGSIZE		4096
@@ -1047,6 +1061,9 @@ struct qlcnic_adapter {
 	u8 drv_tx_rings;  /* max tx rings supported by driver */
 	u8 drv_sds_rings; /* max sds rings supported by driver */
 
+	u8 drv_tss_rings; /* tss ring input */
+	u8 drv_rss_rings; /* rss ring input */
+
 	u8 rx_csum;
 	u8 portnum;
 
@@ -1072,6 +1089,7 @@ struct qlcnic_adapter {
 	u64 dev_rst_time;
 	bool drv_mac_learn;
 	bool fdb_mac_learn;
+	bool rx_mac_learn;
 	unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
 	u8 flash_mfg_id;
 	struct qlcnic_npar_info *npars;
@@ -1260,7 +1278,7 @@ struct qlcnic_pci_func_cfg {
 	u16	port_num;
 	u8	pci_func;
 	u8	func_state;
-	u8	def_mac_addr[6];
+	u8	def_mac_addr[ETH_ALEN];
 };
 
 struct qlcnic_npar_func_cfg {
@@ -1462,8 +1480,6 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
 int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config);
 int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
 int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
-void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *);
-void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
 
 #define ADDR_IN_RANGE(addr, low, high)	\
 	(((addr) < (high)) && ((addr) >= (low)))
@@ -1499,16 +1515,11 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
 
 #define MAX_CTL_CHECK 1000
 
-int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
 void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
 void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
 int qlcnic_dump_fw(struct qlcnic_adapter *);
 int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *);
 bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *);
-pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
-					       pci_channel_state_t);
-pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *);
-void qlcnic_82xx_io_resume(struct pci_dev *);
 
 /* Functions from qlcnic_init.c */
 void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int);
@@ -1543,9 +1554,7 @@ int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
 void qlcnic_watchdog_task(struct work_struct *work);
 void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
 		struct qlcnic_host_rds_ring *rds_ring, u8 ring_id);
-int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
 void qlcnic_set_multi(struct net_device *netdev);
-void __qlcnic_set_multi(struct net_device *, u16);
 int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16);
 int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
 void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
@@ -1558,13 +1567,11 @@ netdev_features_t qlcnic_fix_features(struct net_device *netdev,
 	netdev_features_t features);
 int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
 int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
-int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
 void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *);
 
 /* Functions from qlcnic_ethtool.c */
 int qlcnic_check_loopback_buff(unsigned char *, u8 []);
 int qlcnic_do_lb_test(struct qlcnic_adapter *, u8);
-int qlcnic_loopback_test(struct net_device *, u8);
 
 /* Functions from qlcnic_main.c */
 int qlcnic_reset_context(struct qlcnic_adapter *);
@@ -1573,10 +1580,9 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int);
 netdev_tx_t qlcnic_xmit_frame(struct sk_buff *, struct net_device *);
 void qlcnic_set_tx_ring_count(struct qlcnic_adapter *, u8);
 void qlcnic_set_sds_ring_count(struct qlcnic_adapter *, u8);
-int qlcnic_setup_rings(struct qlcnic_adapter *, u8, u8);
+int qlcnic_setup_rings(struct qlcnic_adapter *);
 int qlcnic_validate_rings(struct qlcnic_adapter *, __u32, int);
 void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
-void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
 int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
 void qlcnic_set_drv_version(struct qlcnic_adapter *);
 
@@ -1605,11 +1611,8 @@ void qlcnic_dump_mbx(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
 
 void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
 void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
-void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
-void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
 void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter);
 void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter);
-int qlcnic_82xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
 
 int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
 int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
@@ -1617,7 +1620,7 @@ void qlcnic_set_vlan_config(struct qlcnic_adapter *,
 			    struct qlcnic_esw_func_cfg *);
 void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *,
 				      struct qlcnic_esw_func_cfg *);
-
+int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter  *);
 void qlcnic_down(struct qlcnic_adapter *, struct net_device *);
 int qlcnic_up(struct qlcnic_adapter *, struct net_device *);
 void __qlcnic_down(struct qlcnic_adapter *, struct net_device *);
@@ -1632,15 +1635,15 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *);
 int qlcnic_set_default_offload_settings(struct qlcnic_adapter *);
 int qlcnic_reset_npar_config(struct qlcnic_adapter *);
 int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
-void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, u16);
-int qlcnic_get_beacon_state(struct qlcnic_adapter *, u8 *);
 int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
 int qlcnic_read_mac_addr(struct qlcnic_adapter *);
 int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
 void qlcnic_set_netdev_features(struct qlcnic_adapter *,
 				struct qlcnic_esw_func_cfg *);
 void qlcnic_sriov_vf_schedule_multi(struct net_device *);
-void qlcnic_vf_add_mc_list(struct net_device *, u16);
+int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
+int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
+			     u16 *);
 
 /*
  * QLOGIC Board information
@@ -1674,11 +1677,8 @@ static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
 
 	err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
 	if (err)
-		dev_err(&adapter->pdev->dev, "failed to set %d Tx queues\n",
-			adapter->drv_tx_rings);
-	else
-		dev_info(&adapter->pdev->dev, "Set %d Tx queues\n",
-			 adapter->drv_tx_rings);
+		netdev_err(netdev, "failed to set %d Tx queues\n",
+			   adapter->drv_tx_rings);
 
 	return err;
 }
@@ -1744,7 +1744,8 @@ struct qlcnic_hardware_ops {
 	int (*change_macvlan) (struct qlcnic_adapter *, u8*, u16, u8);
 	void (*napi_enable) (struct qlcnic_adapter *);
 	void (*napi_disable) (struct qlcnic_adapter *);
-	void (*config_intr_coal) (struct qlcnic_adapter *);
+	int (*config_intr_coal) (struct qlcnic_adapter *,
+				 struct ethtool_coalesce *);
 	int (*config_rss) (struct qlcnic_adapter *, int);
 	int (*config_hw_lro) (struct qlcnic_adapter *, int);
 	int (*config_loopback) (struct qlcnic_adapter *, u8);
@@ -1759,6 +1760,15 @@ struct qlcnic_hardware_ops {
 					       pci_channel_state_t);
 	pci_ers_result_t (*io_slot_reset) (struct pci_dev *);
 	void (*io_resume) (struct pci_dev *);
+	void (*get_beacon_state)(struct qlcnic_adapter *);
+	void (*enable_sds_intr) (struct qlcnic_adapter *,
+				 struct qlcnic_host_sds_ring *);
+	void (*disable_sds_intr) (struct qlcnic_adapter *,
+				  struct qlcnic_host_sds_ring *);
+	void (*enable_tx_intr) (struct qlcnic_adapter *,
+				struct qlcnic_host_tx_ring *);
+	void (*disable_tx_intr) (struct qlcnic_adapter *,
+				 struct qlcnic_host_tx_ring *);
 };
 
 extern struct qlcnic_nic_template qlcnic_vf_ops;
@@ -1931,9 +1941,10 @@ static inline void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
 	adapter->ahw->hw_ops->napi_disable(adapter);
 }
 
-static inline void qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
+static inline int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter,
+					      struct ethtool_coalesce *ethcoal)
 {
-	adapter->ahw->hw_ops->config_intr_coal(adapter);
+	return adapter->ahw->hw_ops->config_intr_coal(adapter, ethcoal);
 }
 
 static inline int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
@@ -1985,6 +1996,11 @@ static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
 		adapter->ahw->hw_ops->set_mac_filter_count(adapter);
 }
 
+static inline void qlcnic_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+	adapter->ahw->hw_ops->get_beacon_state(adapter);
+}
+
 static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
 {
 	if (adapter->ahw->hw_ops->read_phys_port_id)
@@ -2027,6 +2043,54 @@ static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter)
 	return test_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
 }
 
+static inline void
+qlcnic_82xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+			   struct qlcnic_host_tx_ring *tx_ring)
+{
+	if (qlcnic_check_multi_tx(adapter) &&
+	    !adapter->ahw->diag_test)
+		writel(0x0, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_82xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+			    struct qlcnic_host_tx_ring *tx_ring)
+{
+	if (qlcnic_check_multi_tx(adapter) &&
+	    !adapter->ahw->diag_test)
+		writel(1, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+			   struct qlcnic_host_tx_ring *tx_ring)
+{
+	writel(0, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+			    struct qlcnic_host_tx_ring *tx_ring)
+{
+	writel(1, tx_ring->crb_intr_mask);
+}
+
+/* Enable MSI-x and INT-x interrupts */
+static inline void
+qlcnic_83xx_enable_sds_intr(struct qlcnic_adapter *adapter,
+			    struct qlcnic_host_sds_ring *sds_ring)
+{
+	writel(0, sds_ring->crb_intr_mask);
+}
+
+/* Disable MSI-x and INT-x interrupts */
+static inline void
+qlcnic_83xx_disable_sds_intr(struct qlcnic_adapter *adapter,
+			     struct qlcnic_host_sds_ring *sds_ring)
+{
+	writel(1, sds_ring->crb_intr_mask);
+}
+
 static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
 {
 	test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
@@ -2036,10 +2100,10 @@ static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
 /* When operating in a muti tx mode, driver needs to write 0x1
  * to src register, instead of 0x0 to disable receiving interrupt.
  */
-static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
+static inline void
+qlcnic_82xx_disable_sds_intr(struct qlcnic_adapter *adapter,
+			     struct qlcnic_host_sds_ring *sds_ring)
 {
-	struct qlcnic_adapter *adapter = sds_ring->adapter;
-
 	if (qlcnic_check_multi_tx(adapter) &&
 	    !adapter->ahw->diag_test &&
 	    (adapter->flags & QLCNIC_MSIX_ENABLED))
@@ -2048,13 +2112,42 @@ static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
 		writel(0, sds_ring->crb_intr_mask);
 }
 
+static inline void qlcnic_enable_sds_intr(struct qlcnic_adapter *adapter,
+					  struct qlcnic_host_sds_ring *sds_ring)
+{
+	if (adapter->ahw->hw_ops->enable_sds_intr)
+		adapter->ahw->hw_ops->enable_sds_intr(adapter, sds_ring);
+}
+
+static inline void
+qlcnic_disable_sds_intr(struct qlcnic_adapter *adapter,
+			struct qlcnic_host_sds_ring *sds_ring)
+{
+	if (adapter->ahw->hw_ops->disable_sds_intr)
+		adapter->ahw->hw_ops->disable_sds_intr(adapter, sds_ring);
+}
+
+static inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
+					 struct qlcnic_host_tx_ring *tx_ring)
+{
+	if (adapter->ahw->hw_ops->enable_tx_intr)
+		adapter->ahw->hw_ops->enable_tx_intr(adapter, tx_ring);
+}
+
+static inline void qlcnic_disable_tx_intr(struct qlcnic_adapter *adapter,
+					  struct qlcnic_host_tx_ring *tx_ring)
+{
+	if (adapter->ahw->hw_ops->disable_tx_intr)
+		adapter->ahw->hw_ops->disable_tx_intr(adapter, tx_ring);
+}
+
 /* When operating in a muti tx mode, driver needs to write 0x0
  * to src register, instead of 0x1 to enable receiving interrupts.
  */
-static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
+static inline void
+qlcnic_82xx_enable_sds_intr(struct qlcnic_adapter *adapter,
+			    struct qlcnic_host_sds_ring *sds_ring)
 {
-	struct qlcnic_adapter *adapter = sds_ring->adapter;
-
 	if (qlcnic_check_multi_tx(adapter) &&
 	    !adapter->ahw->diag_test &&
 	    (adapter->flags & QLCNIC_MSIX_ENABLED))
@@ -2140,4 +2233,26 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
 
 	return status;
 }
+
+static inline bool qlcnic_83xx_pf_check(struct qlcnic_adapter *adapter)
+{
+	unsigned short device = adapter->pdev->device;
+
+	return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false;
+}
+
+static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
+{
+	unsigned short device = adapter->pdev->device;
+
+	return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+}
+
+static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
+{
+	if (qlcnic_84xx_check(adapter))
+		return QLC_84XX_VNIC_COUNT;
+	else
+		return QLC_DEFAULT_VNIC_COUNT;
+}
 #endif				/* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index f776f99f7915..4146664d4d6a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -13,8 +13,26 @@
 #include <linux/interrupt.h>
 #include <linux/aer.h>
 
+static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
+static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
+static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
+				      struct qlcnic_cmd_args *);
+static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *);
+static irqreturn_t qlcnic_83xx_handle_aen(int, void *);
+static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
+						      pci_channel_state_t);
+static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
+static void qlcnic_83xx_io_resume(struct pci_dev *);
+static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *, u8);
+static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
+static int qlcnic_83xx_resume(struct qlcnic_adapter *);
+static int qlcnic_83xx_shutdown(struct pci_dev *);
+static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *);
+
 #define RSS_HASHTYPE_IP_TCP		0x3
 #define QLC_83XX_FW_MBX_CMD		0
+#define QLC_SKIP_INACTIVE_PCI_REGS	7
 
 static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
 	{QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@ -34,7 +52,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
 	{QLCNIC_CMD_READ_MAX_MTU, 4, 2},
 	{QLCNIC_CMD_READ_MAX_LRO, 4, 2},
 	{QLCNIC_CMD_MAC_ADDRESS, 4, 3},
-	{QLCNIC_CMD_GET_PCI_INFO, 1, 66},
+	{QLCNIC_CMD_GET_PCI_INFO, 1, 129},
 	{QLCNIC_CMD_GET_NIC_INFO, 2, 19},
 	{QLCNIC_CMD_SET_NIC_INFO, 32, 1},
 	{QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
@@ -68,7 +86,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
 	{QLCNIC_CMD_CONFIG_VPORT, 4, 4},
 	{QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
 	{QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
-	{QLCNIC_CMD_DCB_QUERY_PARAM, 2, 50},
+	{QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
 };
 
 const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -180,6 +198,11 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
 	.io_error_detected		= qlcnic_83xx_io_error_detected,
 	.io_slot_reset			= qlcnic_83xx_io_slot_reset,
 	.io_resume			= qlcnic_83xx_io_resume,
+	.get_beacon_state		= qlcnic_83xx_get_beacon_state,
+	.enable_sds_intr		= qlcnic_83xx_enable_sds_intr,
+	.disable_sds_intr		= qlcnic_83xx_disable_sds_intr,
+	.enable_tx_intr			= qlcnic_83xx_enable_tx_intr,
+	.disable_tx_intr		= qlcnic_83xx_disable_tx_intr,
 
 };
 
@@ -267,11 +290,22 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
 	}
 }
 
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
+static void qlcnic_83xx_enable_legacy(struct qlcnic_adapter *adapter)
 {
-	int err, i, num_msix;
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
 
+	/* MSI-X enablement failed, use legacy interrupt */
+	adapter->tgt_status_reg = ahw->pci_base0 + QLC_83XX_INTX_PTR;
+	adapter->tgt_mask_reg = ahw->pci_base0 + QLC_83XX_INTX_MASK;
+	adapter->isr_int_vec = ahw->pci_base0 + QLC_83XX_INTX_TRGR;
+	adapter->msix_entries[0].vector = adapter->pdev->irq;
+	dev_info(&adapter->pdev->dev, "using legacy interrupt\n");
+}
+
+static int qlcnic_83xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
+{
+	int num_msix;
+
 	num_msix = adapter->drv_sds_rings;
 
 	/* account for AEN interrupt MSI-X based interrupts */
@@ -280,29 +314,44 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
 	if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
 		num_msix += adapter->drv_tx_rings;
 
-	err = qlcnic_enable_msix(adapter, num_msix);
-	if (err == -ENOMEM)
-		return err;
-	if (adapter->flags & QLCNIC_MSIX_ENABLED)
-		num_msix = adapter->ahw->num_msix;
-	else {
-		if (qlcnic_sriov_vf_check(adapter))
-			return -EINVAL;
-		num_msix = 1;
+	return num_msix;
+}
+
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_hardware_context *ahw = adapter->ahw;
+	int err, i, num_msix;
+
+	if (adapter->flags & QLCNIC_TSS_RSS) {
+		err = qlcnic_setup_tss_rss_intr(adapter);
+		if (err < 0)
+			return err;
+		num_msix = ahw->num_msix;
+	} else {
+		num_msix = qlcnic_83xx_calculate_msix_vector(adapter);
+
+		err = qlcnic_enable_msix(adapter, num_msix);
+		if (err == -ENOMEM)
+			return err;
+
+		if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+			num_msix = ahw->num_msix;
+		} else {
+			if (qlcnic_sriov_vf_check(adapter))
+				return -EINVAL;
+			num_msix = 1;
+			adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+		}
 	}
+
 	/* setup interrupt mapping table for fw */
 	ahw->intr_tbl = vzalloc(num_msix *
 				sizeof(struct qlcnic_intrpt_config));
 	if (!ahw->intr_tbl)
 		return -ENOMEM;
-	if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
-		/* MSI-X enablement failed, use legacy interrupt */
-		adapter->tgt_status_reg = ahw->pci_base0 + QLC_83XX_INTX_PTR;
-		adapter->tgt_mask_reg = ahw->pci_base0 + QLC_83XX_INTX_MASK;
-		adapter->isr_int_vec = ahw->pci_base0 + QLC_83XX_INTX_TRGR;
-		adapter->msix_entries[0].vector = adapter->pdev->irq;
-		dev_info(&adapter->pdev->dev, "using legacy interrupt\n");
-	}
+
+	if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+		qlcnic_83xx_enable_legacy(adapter);
 
 	for (i = 0; i < num_msix; i++) {
 		if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -312,35 +361,22 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
 		ahw->intr_tbl[i].id = i;
 		ahw->intr_tbl[i].src = 0;
 	}
+
 	return 0;
 }
 
-inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
+static inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
 {
 	writel(0, adapter->tgt_mask_reg);
 }
 
-inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
+static inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
 {
 	if (adapter->tgt_mask_reg)
 		writel(1, adapter->tgt_mask_reg);
 }
 
-/* Enable MSI-x and INT-x interrupts */
-void qlcnic_83xx_enable_intr(struct qlcnic_adapter *adapter,
-			     struct qlcnic_host_sds_ring *sds_ring)
-{
-	writel(0, sds_ring->crb_intr_mask);
-}
-
-/* Disable MSI-x and INT-x interrupts */
-void qlcnic_83xx_disable_intr(struct qlcnic_adapter *adapter,
-			      struct qlcnic_host_sds_ring *sds_ring)
-{
-	writel(1, sds_ring->crb_intr_mask);
-}
-
-inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
+static inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
 						    *adapter)
 {
 	u32 mask;
@@ -477,7 +513,7 @@ irqreturn_t qlcnic_83xx_tmp_intr(int irq, void *data)
 
 done:
 	adapter->ahw->diag_cnt++;
-	qlcnic_83xx_enable_intr(adapter, sds_ring);
+	qlcnic_enable_sds_intr(adapter, sds_ring);
 
 	return IRQ_HANDLED;
 }
@@ -634,10 +670,10 @@ int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter)
 	return status;
 }
 
-void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
+static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
 {
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
-	u16 act_pci_fn = ahw->act_pci_func;
+	u16 act_pci_fn = ahw->total_nic_func;
 	u16 count;
 
 	ahw->max_mc_count = QLC_83XX_MAX_MC_COUNT;
@@ -869,7 +905,7 @@ static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
 	return;
 }
 
-void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
 {
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	u32 event[QLC_83XX_MBX_AEN_CNT];
@@ -1276,8 +1312,8 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
 	/* send the mailbox command*/
 	err = qlcnic_issue_cmd(adapter, &cmd);
 	if (err) {
-		dev_err(&adapter->pdev->dev,
-			"Failed to create Tx ctx in firmware 0x%x\n", err);
+		netdev_err(adapter->netdev,
+			   "Failed to create Tx ctx in firmware 0x%x\n", err);
 		goto out;
 	}
 	mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2];
@@ -1288,8 +1324,9 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
 		intr_mask = ahw->intr_tbl[adapter->drv_sds_rings + ring].src;
 		tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
 	}
-	dev_info(&adapter->pdev->dev, "Tx Context[0x%x] Created, state:0x%x\n",
-		 tx->ctx_id, mbx_out->state);
+	netdev_info(adapter->netdev,
+		    "Tx Context[0x%x] Created, state:0x%x\n",
+		    tx->ctx_id, mbx_out->state);
 out:
 	qlcnic_free_mbx_args(&cmd);
 	return err;
@@ -1341,7 +1378,7 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
 	if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
 		for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 			sds_ring = &adapter->recv_ctx->sds_rings[ring];
-			qlcnic_83xx_enable_intr(adapter, sds_ring);
+			qlcnic_enable_sds_intr(adapter, sds_ring);
 		}
 	}
 
@@ -1366,7 +1403,7 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
 		for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 			sds_ring = &adapter->recv_ctx->sds_rings[ring];
 			if (adapter->flags & QLCNIC_MSIX_ENABLED)
-				qlcnic_83xx_disable_intr(adapter, sds_ring);
+				qlcnic_disable_sds_intr(adapter, sds_ring);
 		}
 	}
 
@@ -1386,6 +1423,33 @@ out:
 	netif_device_attach(netdev);
 }
 
+static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_hardware_context *ahw = adapter->ahw;
+	struct qlcnic_cmd_args cmd;
+	u8 beacon_state;
+	int err = 0;
+
+	err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_CONFIG);
+	if (!err) {
+		err = qlcnic_issue_cmd(adapter, &cmd);
+		if (!err) {
+			beacon_state = cmd.rsp.arg[4];
+			if (beacon_state == QLCNIC_BEACON_DISABLE)
+				ahw->beacon_state = QLC_83XX_BEACON_OFF;
+			else if (beacon_state == QLC_83XX_ENABLE_BEACON)
+				ahw->beacon_state = QLC_83XX_BEACON_ON;
+		}
+	} else {
+		netdev_err(adapter->netdev, "Get beacon state failed, err=%d\n",
+			   err);
+	}
+
+	qlcnic_free_mbx_args(&cmd);
+
+	return;
+}
+
 int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state,
 			   u32 beacon)
 {
@@ -1498,8 +1562,7 @@ int  qlcnic_83xx_set_led(struct net_device *netdev,
 	return err;
 }
 
-void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
-				       int enable)
+void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *adapter, int enable)
 {
 	struct qlcnic_cmd_args cmd;
 	int status;
@@ -1507,21 +1570,21 @@ void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
 	if (qlcnic_sriov_vf_check(adapter))
 		return;
 
-	if (enable) {
+	if (enable)
 		status = qlcnic_alloc_mbx_args(&cmd, adapter,
 					       QLCNIC_CMD_INIT_NIC_FUNC);
-		if (status)
-			return;
-
-		cmd.req.arg[1] = BIT_0 | BIT_31;
-	} else {
+	else
 		status = qlcnic_alloc_mbx_args(&cmd, adapter,
 					       QLCNIC_CMD_STOP_NIC_FUNC);
-		if (status)
-			return;
 
-		cmd.req.arg[1] = BIT_0 | BIT_31;
-	}
+	if (status)
+		return;
+
+	cmd.req.arg[1] = QLC_REGISTER_LB_IDC | QLC_INIT_FW_RESOURCES;
+
+	if (adapter->dcb)
+		cmd.req.arg[1] |= QLC_REGISTER_DCB_AEN;
+
 	status = qlcnic_issue_cmd(adapter, &cmd);
 	if (status)
 		dev_err(&adapter->pdev->dev,
@@ -1531,7 +1594,7 @@ void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
 	qlcnic_free_mbx_args(&cmd);
 }
 
-int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
 {
 	struct qlcnic_cmd_args cmd;
 	int err;
@@ -1548,7 +1611,7 @@ int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
 	return err;
 }
 
-int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter)
 {
 	struct qlcnic_cmd_args cmd;
 	int err;
@@ -1590,7 +1653,9 @@ static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter,
 						 u32 *interface_id)
 {
 	if (qlcnic_sriov_pf_check(adapter)) {
+		qlcnic_alloc_lb_filters_mem(adapter);
 		qlcnic_pf_set_interface_id_promisc(adapter, interface_id);
+		adapter->rx_mac_learn = true;
 	} else {
 		if (!qlcnic_sriov_vf_check(adapter))
 			*interface_id = adapter->recv_ctx->context_id << 16;
@@ -1617,7 +1682,11 @@ int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
 
 	cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
 	qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
-	cmd->req.arg[1] = (mode ? 1 : 0) | temp;
+
+	if (qlcnic_84xx_check(adapter) && qlcnic_sriov_pf_check(adapter))
+		mode = VPORT_MISS_MODE_ACCEPT_ALL;
+
+	cmd->req.arg[1] = mode | temp;
 	err = qlcnic_issue_cmd(adapter, cmd);
 	if (!err)
 		return err;
@@ -1711,7 +1780,7 @@ static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter,
 	ahw->extend_lb_time = 0;
 }
 
-int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
 {
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	struct net_device *netdev = adapter->netdev;
@@ -1780,7 +1849,7 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
 	return status;
 }
 
-int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
 {
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	u32 config = ahw->port_config, max_wait_count;
@@ -2015,8 +2084,8 @@ void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
 	qlcnic_83xx_sre_macaddr_change(adapter, mac, vlan_id, QLCNIC_MAC_ADD);
 }
 
-void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
-			       u8 type, struct qlcnic_cmd_args *cmd)
+static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
+				      u8 type, struct qlcnic_cmd_args *cmd)
 {
 	switch (type) {
 	case QLCNIC_SET_STATION_MAC:
@@ -2060,37 +2129,130 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
 	return err;
 }
 
-void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_set_rx_intr_coal(struct qlcnic_adapter *adapter)
 {
-	int err;
-	u16 temp;
-	struct qlcnic_cmd_args cmd;
 	struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+	struct qlcnic_cmd_args cmd;
+	u16 temp;
+	int err;
 
-	if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
-		return;
+	err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
+	if (err)
+		return err;
+
+	temp = adapter->recv_ctx->context_id;
+	cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16;
+	temp = coal->rx_time_us;
+	cmd.req.arg[2] = coal->rx_packets | temp << 16;
+	cmd.req.arg[3] = coal->flag;
+
+	err = qlcnic_issue_cmd(adapter, &cmd);
+	if (err != QLCNIC_RCODE_SUCCESS)
+		netdev_err(adapter->netdev,
+			   "failed to set interrupt coalescing parameters\n");
+
+	qlcnic_free_mbx_args(&cmd);
+
+	return err;
+}
+
+static int qlcnic_83xx_set_tx_intr_coal(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+	struct qlcnic_cmd_args cmd;
+	u16 temp;
+	int err;
 
 	err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
 	if (err)
-		return;
+		return err;
 
-	if (coal->type == QLCNIC_INTR_COAL_TYPE_RX) {
-		temp = adapter->recv_ctx->context_id;
-		cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16;
-		temp = coal->rx_time_us;
-		cmd.req.arg[2] = coal->rx_packets | temp << 16;
-	} else if (coal->type == QLCNIC_INTR_COAL_TYPE_TX) {
-		temp = adapter->tx_ring->ctx_id;
-		cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_TX | temp << 16;
-		temp = coal->tx_time_us;
-		cmd.req.arg[2] = coal->tx_packets | temp << 16;
-	}
+	temp = adapter->tx_ring->ctx_id;
+	cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_TX | temp << 16;
+	temp = coal->tx_time_us;
+	cmd.req.arg[2] = coal->tx_packets | temp << 16;
 	cmd.req.arg[3] = coal->flag;
+
 	err = qlcnic_issue_cmd(adapter, &cmd);
 	if (err != QLCNIC_RCODE_SUCCESS)
-		dev_info(&adapter->pdev->dev,
-			 "Failed to send interrupt coalescence parameters\n");
+		netdev_err(adapter->netdev,
+			   "failed to set interrupt coalescing  parameters\n");
+
 	qlcnic_free_mbx_args(&cmd);
+
+	return err;
+}
+
+int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *adapter)
+{
+	int err = 0;
+
+	err = qlcnic_83xx_set_rx_intr_coal(adapter);
+	if (err)
+		netdev_err(adapter->netdev,
+			   "failed to set Rx coalescing parameters\n");
+
+	err = qlcnic_83xx_set_tx_intr_coal(adapter);
+	if (err)
+		netdev_err(adapter->netdev,
+			   "failed to set Tx coalescing parameters\n");
+
+	return err;
+}
+
+int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter,
+				 struct ethtool_coalesce *ethcoal)
+{
+	struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+	u32 rx_coalesce_usecs, rx_max_frames;
+	u32 tx_coalesce_usecs, tx_max_frames;
+	int err;
+
+	if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+		return -EIO;
+
+	tx_coalesce_usecs = ethcoal->tx_coalesce_usecs;
+	tx_max_frames = ethcoal->tx_max_coalesced_frames;
+	rx_coalesce_usecs = ethcoal->rx_coalesce_usecs;
+	rx_max_frames = ethcoal->rx_max_coalesced_frames;
+	coal->flag = QLCNIC_INTR_DEFAULT;
+
+	if ((coal->rx_time_us == rx_coalesce_usecs) &&
+	    (coal->rx_packets == rx_max_frames)) {
+		coal->type = QLCNIC_INTR_COAL_TYPE_TX;
+		coal->tx_time_us = tx_coalesce_usecs;
+		coal->tx_packets = tx_max_frames;
+	} else if ((coal->tx_time_us == tx_coalesce_usecs) &&
+		   (coal->tx_packets == tx_max_frames)) {
+		coal->type = QLCNIC_INTR_COAL_TYPE_RX;
+		coal->rx_time_us = rx_coalesce_usecs;
+		coal->rx_packets = rx_max_frames;
+	} else {
+		coal->type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+		coal->rx_time_us = rx_coalesce_usecs;
+		coal->rx_packets = rx_max_frames;
+		coal->tx_time_us = tx_coalesce_usecs;
+		coal->tx_packets = tx_max_frames;
+	}
+
+	switch (coal->type) {
+	case QLCNIC_INTR_COAL_TYPE_RX:
+		err = qlcnic_83xx_set_rx_intr_coal(adapter);
+		break;
+	case QLCNIC_INTR_COAL_TYPE_TX:
+		err = qlcnic_83xx_set_tx_intr_coal(adapter);
+		break;
+	case QLCNIC_INTR_COAL_TYPE_RX_TX:
+		err = qlcnic_83xx_set_rx_tx_intr_coal(adapter);
+		break;
+	default:
+		err = -EINVAL;
+		netdev_err(adapter->netdev,
+			   "Invalid Interrupt coalescing type\n");
+		break;
+	}
+
+	return err;
 }
 
 static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
@@ -2119,7 +2281,7 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
 	qlcnic_advert_link_change(adapter, link_status);
 }
 
-irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
+static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
 {
 	struct qlcnic_adapter *adapter = data;
 	struct qlcnic_mailbox *mbx;
@@ -2145,36 +2307,6 @@ out:
 	return IRQ_HANDLED;
 }
 
-int qlcnic_enable_eswitch(struct qlcnic_adapter *adapter, u8 port, u8 enable)
-{
-	int err = -EIO;
-	struct qlcnic_cmd_args cmd;
-
-	if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
-		dev_err(&adapter->pdev->dev,
-			"%s: Error, invoked by non management func\n",
-			__func__);
-		return err;
-	}
-
-	err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH);
-	if (err)
-		return err;
-
-	cmd.req.arg[1] = (port & 0xf) | BIT_4;
-	err = qlcnic_issue_cmd(adapter, &cmd);
-
-	if (err != QLCNIC_RCODE_SUCCESS) {
-		dev_err(&adapter->pdev->dev, "Failed to enable eswitch%d\n",
-			err);
-		err = -EIO;
-	}
-	qlcnic_free_mbx_args(&cmd);
-
-	return err;
-
-}
-
 int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *adapter,
 			     struct qlcnic_info *nic)
 {
@@ -2268,11 +2400,37 @@ out:
 	return err;
 }
 
+int qlcnic_get_pci_func_type(struct qlcnic_adapter *adapter, u16 type,
+			     u16 *nic, u16 *fcoe, u16 *iscsi)
+{
+	struct device *dev = &adapter->pdev->dev;
+	int err = 0;
+
+	switch (type) {
+	case QLCNIC_TYPE_NIC:
+		(*nic)++;
+		break;
+	case QLCNIC_TYPE_FCOE:
+		(*fcoe)++;
+		break;
+	case QLCNIC_TYPE_ISCSI:
+		(*iscsi)++;
+		break;
+	default:
+		dev_err(dev, "%s: Unknown PCI type[%x]\n",
+			__func__, type);
+		err = -EIO;
+	}
+
+	return err;
+}
+
 int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
 			     struct qlcnic_pci_info *pci_info)
 {
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	struct device *dev = &adapter->pdev->dev;
+	u16 nic = 0, fcoe = 0, iscsi = 0;
 	struct qlcnic_cmd_args cmd;
 	int i, err = 0, j = 0;
 	u32 temp;
@@ -2283,16 +2441,20 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
 
 	err = qlcnic_issue_cmd(adapter, &cmd);
 
-	ahw->act_pci_func = 0;
+	ahw->total_nic_func = 0;
 	if (err == QLCNIC_RCODE_SUCCESS) {
 		ahw->max_pci_func = cmd.rsp.arg[1] & 0xFF;
-		for (i = 2, j = 0; j < QLCNIC_MAX_PCI_FUNC; j++, pci_info++) {
+		for (i = 2, j = 0; j < ahw->max_vnic_func; j++, pci_info++) {
 			pci_info->id = cmd.rsp.arg[i] & 0xFFFF;
 			pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
 			i++;
+			if (!pci_info->active) {
+				i += QLC_SKIP_INACTIVE_PCI_REGS;
+				continue;
+			}
 			pci_info->type = cmd.rsp.arg[i] & 0xFFFF;
-			if (pci_info->type == QLCNIC_TYPE_NIC)
-				ahw->act_pci_func++;
+			err = qlcnic_get_pci_func_type(adapter, pci_info->type,
+						       &nic, &fcoe, &iscsi);
 			temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
 			pci_info->default_port = temp;
 			i++;
@@ -2310,6 +2472,13 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
 		err = -EIO;
 	}
 
+	ahw->total_nic_func = nic;
+	ahw->total_pci_func = nic + fcoe + iscsi;
+	if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
+		dev_err(dev, "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
+			__func__, ahw->total_nic_func, ahw->total_pci_func);
+		err = -EIO;
+	}
 	qlcnic_free_mbx_args(&cmd);
 
 	return err;
@@ -3459,7 +3628,7 @@ int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)
 	return 0;
 }
 
-int qlcnic_83xx_shutdown(struct pci_dev *pdev)
+static int qlcnic_83xx_shutdown(struct pci_dev *pdev)
 {
 	struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
 	struct net_device *netdev = adapter->netdev;
@@ -3481,7 +3650,7 @@ int qlcnic_83xx_shutdown(struct pci_dev *pdev)
 	return 0;
 }
 
-int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
 {
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	struct qlc_83xx_idc *idc = &ahw->idc;
@@ -3834,8 +4003,8 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter)
 	return 0;
 }
 
-pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
-					       pci_channel_state_t state)
+static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
+						      pci_channel_state_t state)
 {
 	struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
 
@@ -3856,7 +4025,7 @@ pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
 	return PCI_ERS_RESULT_NEED_RESET;
 }
 
-pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
+static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
 {
 	struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
 	int err = 0;
@@ -3879,7 +4048,7 @@ disconnect:
 	return PCI_ERS_RESULT_DISCONNECT;
 }
 
-void qlcnic_83xx_io_resume(struct pci_dev *pdev)
+static void qlcnic_83xx_io_resume(struct pci_dev *pdev)
 {
 	struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
 
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index a6a33508e401..f92485ca21d1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -324,6 +324,11 @@ struct qlc_83xx_idc {
 	char		**name;
 };
 
+enum qlcnic_vlan_operations {
+	QLC_VLAN_ADD = 0,
+	QLC_VLAN_DELETE
+};
+
 /* Device States */
 enum qlcnic_83xx_states {
 	QLC_83XX_IDC_DEV_UNKNOWN,
@@ -376,6 +381,8 @@ enum qlcnic_83xx_states {
 
 /* LED configuration settings */
 #define QLC_83XX_ENABLE_BEACON		0xe
+#define QLC_83XX_BEACON_ON		1
+#define QLC_83XX_BEACON_OFF		0
 #define QLC_83XX_LED_RATE		0xff
 #define QLC_83XX_LED_ACT		(1 << 10)
 #define QLC_83XX_LED_MOD		(0 << 13)
@@ -518,6 +525,11 @@ enum qlc_83xx_ext_regs {
 	QLC_83XX_ASIC_TEMP,
 };
 
+/* Initialize/Stop NIC command bit definitions */
+#define QLC_REGISTER_DCB_AEN		BIT_1
+#define QLC_REGISTER_LB_IDC		BIT_0
+#define QLC_INIT_FW_RESOURCES		BIT_31
+
 /* 83xx funcitons */
 int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
 int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
@@ -532,17 +544,13 @@ void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
 void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
 int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong, int *);
 int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
-void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *, int, u64 []);
 int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
-int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *, u8);
-int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
 int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
 int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
-int qlcnic_83xx_config_intr_coalesce(struct qlcnic_adapter *);
 void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
 int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
 int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
-void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *, int);
+void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
 
 int qlcnic_83xx_napi_add(struct qlcnic_adapter *, struct net_device *);
 void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
@@ -563,32 +571,22 @@ void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
 int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
 int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
 int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8);
-void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
-			       struct qlcnic_cmd_args *);
 int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
 			       struct qlcnic_adapter *, u32);
 void qlcnic_free_mbx_args(struct qlcnic_cmd_args *);
 void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
 			  struct qlcnic_info *);
-void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *);
-irqreturn_t qlcnic_83xx_handle_aen(int, void *);
+int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *,
+				 struct ethtool_coalesce *);
+int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *);
 int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
 void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *);
 void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *);
 irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
 irqreturn_t qlcnic_83xx_intr(int, void *);
 irqreturn_t qlcnic_83xx_tmp_intr(int, void *);
-void qlcnic_83xx_enable_intr(struct qlcnic_adapter *,
-			     struct qlcnic_host_sds_ring *);
-void qlcnic_83xx_disable_intr(struct qlcnic_adapter *,
-			     struct qlcnic_host_sds_ring *);
 void qlcnic_83xx_check_vf(struct qlcnic_adapter *,
 			  const struct pci_device_id *);
-void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
-int qlcnic_83xx_get_port_config(struct qlcnic_adapter *);
-int qlcnic_83xx_set_port_config(struct qlcnic_adapter *);
-int qlcnic_enable_eswitch(struct qlcnic_adapter *, u8, u8);
-int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *);
 int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *);
 int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *);
 void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *);
@@ -610,9 +608,7 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *,
 				      u32, u8 *, int);
 int qlcnic_83xx_init(struct qlcnic_adapter *, int);
 int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *);
-int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev);
 void qlcnic_83xx_idc_poll_dev_state(struct work_struct *);
-int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *);
 void qlcnic_83xx_idc_exit(struct qlcnic_adapter *);
 void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
 int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
@@ -620,7 +616,6 @@ void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
 int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
 int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
 int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
-int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *, int);
 int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
 int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
 int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
@@ -648,9 +643,6 @@ int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
 int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
 void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
 void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
-void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
-int qlcnic_83xx_shutdown(struct pci_dev *);
-int qlcnic_83xx_resume(struct qlcnic_adapter *);
 int qlcnic_83xx_idc_init(struct qlcnic_adapter *);
 int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *);
 int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *);
@@ -658,9 +650,4 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
 void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *);
 int qlcnic_83xx_aer_reset(struct qlcnic_adapter *);
 void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *);
-pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
-					       pci_channel_state_t);
-pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
-void qlcnic_83xx_io_resume(struct pci_dev *);
-void qlcnic_83xx_stop_hw(struct qlcnic_adapter *);
 #endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 918e18ddf038..90a2dda351ec 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -39,6 +39,9 @@
 static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter);
 static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev);
 static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter);
+static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev);
+static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *);
+static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *);
 
 /* Template header */
 struct qlc_83xx_reset_hdr {
@@ -380,7 +383,7 @@ static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter)
 	qlcnic_up(adapter, netdev);
 	netif_device_attach(netdev);
 	clear_bit(__QLCNIC_RESETTING, &adapter->state);
-	dev_err(&adapter->pdev->dev, "%s:\n", __func__);
+	netdev_info(adapter->netdev, "%s: soft reset complete.\n", __func__);
 
 	return 0;
 }
@@ -614,8 +617,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
 	qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
 	qlcnic_83xx_enable_mbx_interrupt(adapter);
 
-	/* register for NIC IDC AEN Events */
-	qlcnic_83xx_register_nic_idc_func(adapter, 1);
+	qlcnic_83xx_initialize_nic(adapter, 1);
 
 	err = qlcnic_sriov_pf_reinit(adapter);
 	if (err)
@@ -1529,7 +1531,7 @@ static int qlcnic_83xx_check_cmd_peg_status(struct qlcnic_adapter *p_dev)
 	return -EIO;
 }
 
-int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
+static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
 {
 	int err;
 
@@ -1602,7 +1604,7 @@ static int qlcnic_83xx_reset_template_checksum(struct qlcnic_adapter *p_dev)
 	}
 }
 
-int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
+static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
 {
 	struct qlcnic_hardware_context *ahw = p_dev->ahw;
 	u32 addr, count, prev_ver, curr_ver;
@@ -1946,7 +1948,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
 	p_dev->ahw->reset.seq_index = index;
 }
 
-void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
+static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
 {
 	p_dev->ahw->reset.seq_index = 0;
 
@@ -2029,7 +2031,7 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
 	return 0;
 }
 
-int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
 {
 	int err;
 	struct qlcnic_info nic_info;
@@ -2213,9 +2215,9 @@ static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter)
 int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
 {
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
-	struct qlcnic_dcb *dcb;
 	int err = 0;
 
+	adapter->rx_mac_learn = false;
 	ahw->msix_supported = !!qlcnic_use_msi_x;
 
 	qlcnic_83xx_init_rings(adapter);
@@ -2265,8 +2267,7 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
 
 	INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
 
-	/* register for NIC IDC AEN Events */
-	qlcnic_83xx_register_nic_idc_func(adapter, 1);
+	qlcnic_83xx_initialize_nic(adapter, 1);
 
 	/* Configure default, SR-IOV or Virtual NIC mode of operation */
 	err = qlcnic_83xx_configure_opmode(adapter);
@@ -2279,11 +2280,6 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
 	if (err)
 		goto disable_mbx_intr;
 
-	dcb = adapter->dcb;
-
-	if (dcb && qlcnic_dcb_attach(dcb))
-		qlcnic_clear_dcb_ops(dcb);
-
 	/* Periodically monitor device status */
 	qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
 	return 0;
@@ -2314,7 +2310,7 @@ void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter)
 		qlcnic_83xx_disable_vnic_mode(adapter, 1);
 
 	qlcnic_83xx_idc_detach_driver(adapter);
-	qlcnic_83xx_register_nic_idc_func(adapter, 0);
+	qlcnic_83xx_initialize_nic(adapter, 0);
 
 	cancel_delayed_work_sync(&adapter->idc_aen_work);
 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index 734d28602ac3..be7d7a62cc0d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -8,7 +8,7 @@
 #include "qlcnic.h"
 #include "qlcnic_hw.h"
 
-int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
+static int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
 {
 	if (lock) {
 		if (qlcnic_83xx_lock_driver(adapter))
@@ -107,7 +107,7 @@ static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
 
 		npar = adapter->npars;
 
-		for (i = 0; i < ahw->act_pci_func; i++, npar++) {
+		for (i = 0; i < ahw->total_nic_func; i++, npar++) {
 			dev_info(dev, "id:%d active:%d type:%d port:%d min_bw:%d max_bw:%d mac_addr:%pM\n",
 				 npar->pci_func, npar->active, npar->type,
 				 npar->phy_port, npar->min_bw, npar->max_bw,
@@ -115,7 +115,7 @@ static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
 		}
 
 		dev_info(dev, "Max functions = %d, active functions = %d\n",
-			 ahw->max_pci_func, ahw->act_pci_func);
+			 ahw->max_pci_func, ahw->total_nic_func);
 
 		if (qlcnic_83xx_set_vnic_opmode(adapter))
 			return err;
@@ -224,10 +224,14 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
 		return -EIO;
 	}
 
-	if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+	if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) {
 		adapter->flags |= QLCNIC_ESWITCH_ENABLED;
-	else
+		if (adapter->drv_mac_learn)
+			adapter->rx_mac_learn = true;
+	} else {
 		adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+		adapter->rx_mac_learn = false;
+	}
 
 	ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
 	ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 859cb161fc63..64dcbf33d8f0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -91,18 +91,6 @@ void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd)
 	cmd->rsp.arg = NULL;
 }
 
-static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
-{
-	int i;
-
-	for (i = 0; i < adapter->ahw->act_pci_func; i++) {
-		if (adapter->npars[i].pci_func == pci_func)
-			return i;
-	}
-
-	return -1;
-}
-
 static u32
 qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
 {
@@ -966,13 +954,15 @@ out_free_dma:
 int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
 			     struct qlcnic_pci_info *pci_info)
 {
-	int err = 0, i;
+	struct qlcnic_hardware_context *ahw = adapter->ahw;
+	size_t npar_size = sizeof(struct qlcnic_pci_info_le);
+	size_t pci_size = npar_size * ahw->max_vnic_func;
+	u16 nic = 0, fcoe = 0, iscsi = 0;
+	struct qlcnic_pci_info_le *npar;
 	struct qlcnic_cmd_args cmd;
 	dma_addr_t pci_info_dma_t;
-	struct qlcnic_pci_info_le *npar;
 	void *pci_info_addr;
-	size_t npar_size = sizeof(struct qlcnic_pci_info_le);
-	size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
+	int err = 0, i;
 
 	pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size,
 					    &pci_info_dma_t, GFP_KERNEL);
@@ -989,14 +979,16 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
 	cmd.req.arg[3] = pci_size;
 	err = qlcnic_issue_cmd(adapter, &cmd);
 
-	adapter->ahw->act_pci_func = 0;
+	ahw->total_nic_func = 0;
 	if (err == QLCNIC_RCODE_SUCCESS) {
-		for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
+		for (i = 0; i < ahw->max_vnic_func; i++, npar++, pci_info++) {
 			pci_info->id = le16_to_cpu(npar->id);
 			pci_info->active = le16_to_cpu(npar->active);
+			if (!pci_info->active)
+				continue;
 			pci_info->type = le16_to_cpu(npar->type);
-			if (pci_info->type == QLCNIC_TYPE_NIC)
-				adapter->ahw->act_pci_func++;
+			err = qlcnic_get_pci_func_type(adapter, pci_info->type,
+						       &nic, &fcoe, &iscsi);
 			pci_info->default_port =
 				le16_to_cpu(npar->default_port);
 			pci_info->tx_min_bw =
@@ -1011,6 +1003,14 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
 		err = -EIO;
 	}
 
+	ahw->total_nic_func = nic;
+	ahw->total_pci_func = nic + fcoe + iscsi;
+	if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
+		dev_err(&adapter->pdev->dev,
+			"%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
+			__func__, ahw->total_nic_func, ahw->total_pci_func);
+		err = -EIO;
+	}
 	qlcnic_free_mbx_args(&cmd);
 out_free_dma:
 	dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
@@ -1203,7 +1203,7 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
 	esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
 	esw_stats->context_id = eswitch;
 
-	for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+	for (i = 0; i < adapter->ahw->total_nic_func; i++) {
 		if (adapter->npars[i].phy_port != eswitch)
 			continue;
 
@@ -1236,15 +1236,16 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
 int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
 		const u8 port, const u8 rx_tx)
 {
+	struct qlcnic_hardware_context *ahw = adapter->ahw;
+	struct qlcnic_cmd_args cmd;
 	int err;
 	u32 arg1;
-	struct qlcnic_cmd_args cmd;
 
-	if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+	if (ahw->op_mode != QLCNIC_MGMT_FUNC)
 		return -EIO;
 
 	if (func_esw == QLCNIC_STATS_PORT) {
-		if (port >= QLCNIC_MAX_PCI_FUNC)
+		if (port >= ahw->max_vnic_func)
 			goto err_ret;
 	} else if (func_esw == QLCNIC_STATS_ESWITCH) {
 		if (port >= QLCNIC_NIU_MAX_XG_PORTS)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index 86bca7c14f99..77f1bce432d2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -15,7 +15,6 @@
 
 #define QLC_DCB_GET_MAP(V)		(1 << V)
 
-#define QLC_DCB_AEN_BIT			0x2
 #define QLC_DCB_FW_VER			0x2
 #define QLC_DCB_MAX_TC			0x8
 #define QLC_DCB_MAX_APP			0x8
@@ -71,7 +70,6 @@ static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
 static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *);
 static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
 static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *, bool);
 static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
 
 struct qlcnic_dcb_capability {
@@ -179,7 +177,6 @@ static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
 	.get_hw_capability	= qlcnic_83xx_dcb_get_hw_capability,
 	.query_cee_param	= qlcnic_83xx_dcb_query_cee_param,
 	.get_cee_cfg		= qlcnic_83xx_dcb_get_cee_cfg,
-	.register_aen		= qlcnic_83xx_dcb_register_aen,
 	.aen_handler		= qlcnic_83xx_dcb_aen_handler,
 };
 
@@ -260,6 +257,9 @@ int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
 {
 	struct qlcnic_dcb *dcb;
 
+	if (qlcnic_sriov_vf_check(adapter))
+		return 0;
+
 	dcb = kzalloc(sizeof(struct qlcnic_dcb), GFP_ATOMIC);
 	if (!dcb)
 		return -ENOMEM;
@@ -280,7 +280,6 @@ static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb)
 		return;
 
 	adapter = dcb->adapter;
-	qlcnic_dcb_register_aen(dcb, 0);
 
 	while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
 		usleep_range(10000, 11000);
@@ -304,7 +303,6 @@ static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
 {
 	qlcnic_dcb_get_hw_capability(dcb);
 	qlcnic_dcb_get_cee_cfg(dcb);
-	qlcnic_dcb_register_aen(dcb, 1);
 }
 
 static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
@@ -642,29 +640,6 @@ static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
 	return err;
 }
 
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *dcb, bool flag)
-{
-	u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC);
-	struct qlcnic_adapter *adapter = dcb->adapter;
-	struct qlcnic_cmd_args cmd;
-	int err;
-
-	err = qlcnic_alloc_mbx_args(&cmd, adapter, val);
-	if (err)
-		return err;
-
-	cmd.req.arg[1] = QLC_DCB_AEN_BIT;
-
-	err = qlcnic_issue_cmd(adapter, &cmd);
-	if (err)
-		dev_err(&adapter->pdev->dev, "Failed to %s DCBX AEN, err %d\n",
-			(flag ? "register" : "unregister"), err);
-
-	qlcnic_free_mbx_args(&cmd);
-
-	return err;
-}
-
 static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
 {
 	u32 *val = data;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
index c04ae0cdc108..3cf4a10fbe1e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -25,7 +25,6 @@ struct qlcnic_dcb_ops {
 	int (*get_hw_capability) (struct qlcnic_dcb *);
 	int (*query_cee_param) (struct qlcnic_dcb *, char *, u8);
 	void (*init_dcbnl_ops) (struct qlcnic_dcb *);
-	int (*register_aen) (struct qlcnic_dcb *, bool);
 	void (*aen_handler) (struct qlcnic_dcb *, void *);
 	int (*get_cee_cfg) (struct qlcnic_dcb *);
 	void (*get_info) (struct qlcnic_dcb *);
@@ -103,13 +102,6 @@ static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
 	return 0;
 }
 
-static inline void
-qlcnic_dcb_register_aen(struct qlcnic_dcb *dcb, u8 flag)
-{
-	if (dcb && dcb->ops->register_aen)
-		dcb->ops->register_aen(dcb, flag);
-}
-
 static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)
 {
 	if (dcb && dcb->ops->aen_handler)
@@ -121,4 +113,10 @@ static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
 	if (dcb && dcb->ops->init_dcbnl_ops)
 		dcb->ops->init_dcbnl_ops(dcb);
 }
+
+static inline void qlcnic_dcb_enable(struct qlcnic_dcb *dcb)
+{
+	if (dcb && qlcnic_dcb_attach(dcb))
+		qlcnic_clear_dcb_ops(dcb);
+}
 #endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 6b08194aa0d4..acee1a5d80c6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -229,7 +229,7 @@ static const u32 ext_diag_registers[] = {
 	-1
 };
 
-#define QLCNIC_MGMT_API_VERSION	2
+#define QLCNIC_MGMT_API_VERSION	3
 #define QLCNIC_ETHTOOL_REGS_VER	4
 
 static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter)
@@ -278,21 +278,8 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
 		sizeof(drvinfo->version));
 }
 
-static int
-qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
-{
-	struct qlcnic_adapter *adapter = netdev_priv(dev);
-
-	if (qlcnic_82xx_check(adapter))
-		return qlcnic_82xx_get_settings(adapter, ecmd);
-	else if (qlcnic_83xx_check(adapter))
-		return qlcnic_83xx_get_settings(adapter, ecmd);
-
-	return -EIO;
-}
-
-int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
-			     struct ethtool_cmd *ecmd)
+static int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
+				    struct ethtool_cmd *ecmd)
 {
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	u32 speed, reg;
@@ -433,6 +420,20 @@ skip:
 	return 0;
 }
 
+static int qlcnic_get_settings(struct net_device *dev,
+			       struct ethtool_cmd *ecmd)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+	if (qlcnic_82xx_check(adapter))
+		return qlcnic_82xx_get_settings(adapter, ecmd);
+	else if (qlcnic_83xx_check(adapter))
+		return qlcnic_83xx_get_settings(adapter, ecmd);
+
+	return -EIO;
+}
+
+
 static int qlcnic_set_port_config(struct qlcnic_adapter *adapter,
 				  struct ethtool_cmd *ecmd)
 {
@@ -527,6 +528,9 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
 	regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
 	regs_buff[1] = QLCNIC_MGMT_API_VERSION;
 
+	if (adapter->ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+		regs_buff[2] = adapter->ahw->max_vnic_func;
+
 	if (qlcnic_82xx_check(adapter))
 		i = qlcnic_82xx_get_registers(adapter, regs_buff);
 	else
@@ -732,6 +736,7 @@ static int qlcnic_set_channels(struct net_device *dev,
 				   channel->rx_count);
 			return err;
 		}
+		adapter->drv_rss_rings = channel->rx_count;
 	}
 
 	if (channel->tx_count) {
@@ -742,10 +747,12 @@ static int qlcnic_set_channels(struct net_device *dev,
 				   channel->tx_count);
 			return err;
 		}
+		adapter->drv_tss_rings = channel->tx_count;
 	}
 
-	err = qlcnic_setup_rings(adapter, channel->rx_count,
-				 channel->tx_count);
+	adapter->flags |= QLCNIC_TSS_RSS;
+
+	err = qlcnic_setup_rings(adapter);
 	netdev_info(dev, "Allocated %d SDS rings and %d Tx rings\n",
 		    adapter->drv_sds_rings, adapter->drv_tx_rings);
 
@@ -1052,7 +1059,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
 	return 0;
 }
 
-int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
+static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	int drv_tx_rings = adapter->drv_tx_rings;
@@ -1491,9 +1498,7 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
 			struct ethtool_coalesce *ethcoal)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
-	struct qlcnic_nic_intr_coalesce *coal;
-	u32 rx_coalesce_usecs, rx_max_frames;
-	u32 tx_coalesce_usecs, tx_max_frames;
+	int err;
 
 	if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
 		return -EINVAL;
@@ -1503,82 +1508,31 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
 	* unsupported parameters are set.
 	*/
 	if (ethcoal->rx_coalesce_usecs > 0xffff ||
-		ethcoal->rx_max_coalesced_frames > 0xffff ||
-		ethcoal->tx_coalesce_usecs > 0xffff ||
-		ethcoal->tx_max_coalesced_frames > 0xffff ||
-		ethcoal->rx_coalesce_usecs_irq ||
-		ethcoal->rx_max_coalesced_frames_irq ||
-		ethcoal->tx_coalesce_usecs_irq ||
-		ethcoal->tx_max_coalesced_frames_irq ||
-		ethcoal->stats_block_coalesce_usecs ||
-		ethcoal->use_adaptive_rx_coalesce ||
-		ethcoal->use_adaptive_tx_coalesce ||
-		ethcoal->pkt_rate_low ||
-		ethcoal->rx_coalesce_usecs_low ||
-		ethcoal->rx_max_coalesced_frames_low ||
-		ethcoal->tx_coalesce_usecs_low ||
-		ethcoal->tx_max_coalesced_frames_low ||
-		ethcoal->pkt_rate_high ||
-		ethcoal->rx_coalesce_usecs_high ||
-		ethcoal->rx_max_coalesced_frames_high ||
-		ethcoal->tx_coalesce_usecs_high ||
-		ethcoal->tx_max_coalesced_frames_high)
+	    ethcoal->rx_max_coalesced_frames > 0xffff ||
+	    ethcoal->tx_coalesce_usecs > 0xffff ||
+	    ethcoal->tx_max_coalesced_frames > 0xffff ||
+	    ethcoal->rx_coalesce_usecs_irq ||
+	    ethcoal->rx_max_coalesced_frames_irq ||
+	    ethcoal->tx_coalesce_usecs_irq ||
+	    ethcoal->tx_max_coalesced_frames_irq ||
+	    ethcoal->stats_block_coalesce_usecs ||
+	    ethcoal->use_adaptive_rx_coalesce ||
+	    ethcoal->use_adaptive_tx_coalesce ||
+	    ethcoal->pkt_rate_low ||
+	    ethcoal->rx_coalesce_usecs_low ||
+	    ethcoal->rx_max_coalesced_frames_low ||
+	    ethcoal->tx_coalesce_usecs_low ||
+	    ethcoal->tx_max_coalesced_frames_low ||
+	    ethcoal->pkt_rate_high ||
+	    ethcoal->rx_coalesce_usecs_high ||
+	    ethcoal->rx_max_coalesced_frames_high ||
+	    ethcoal->tx_coalesce_usecs_high ||
+	    ethcoal->tx_max_coalesced_frames_high)
 		return -EINVAL;
 
-	coal = &adapter->ahw->coal;
+	err = qlcnic_config_intr_coalesce(adapter, ethcoal);
 
-	if (qlcnic_83xx_check(adapter)) {
-		if (!ethcoal->tx_coalesce_usecs ||
-		    !ethcoal->tx_max_coalesced_frames ||
-		    !ethcoal->rx_coalesce_usecs ||
-		    !ethcoal->rx_max_coalesced_frames) {
-			coal->flag = QLCNIC_INTR_DEFAULT;
-			coal->type = QLCNIC_INTR_COAL_TYPE_RX;
-			coal->rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
-			coal->rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
-			coal->tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
-			coal->tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
-		} else {
-			tx_coalesce_usecs = ethcoal->tx_coalesce_usecs;
-			tx_max_frames = ethcoal->tx_max_coalesced_frames;
-			rx_coalesce_usecs = ethcoal->rx_coalesce_usecs;
-			rx_max_frames = ethcoal->rx_max_coalesced_frames;
-			coal->flag = 0;
-
-			if ((coal->rx_time_us == rx_coalesce_usecs) &&
-			    (coal->rx_packets == rx_max_frames)) {
-				coal->type = QLCNIC_INTR_COAL_TYPE_TX;
-				coal->tx_time_us = tx_coalesce_usecs;
-				coal->tx_packets = tx_max_frames;
-			} else if ((coal->tx_time_us == tx_coalesce_usecs) &&
-				   (coal->tx_packets == tx_max_frames)) {
-				coal->type = QLCNIC_INTR_COAL_TYPE_RX;
-				coal->rx_time_us = rx_coalesce_usecs;
-				coal->rx_packets = rx_max_frames;
-			} else {
-				coal->type = QLCNIC_INTR_COAL_TYPE_RX;
-				coal->rx_time_us = rx_coalesce_usecs;
-				coal->rx_packets = rx_max_frames;
-				coal->tx_time_us = tx_coalesce_usecs;
-				coal->tx_packets = tx_max_frames;
-			}
-		}
-	} else {
-		if (!ethcoal->rx_coalesce_usecs ||
-		    !ethcoal->rx_max_coalesced_frames) {
-			coal->flag = QLCNIC_INTR_DEFAULT;
-			coal->rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
-			coal->rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
-		} else {
-			coal->flag = 0;
-			coal->rx_time_us = ethcoal->rx_coalesce_usecs;
-			coal->rx_packets = ethcoal->rx_max_coalesced_frames;
-		}
-	}
-
-	qlcnic_config_intr_coalesce(adapter);
-
-	return 0;
+	return err;
 }
 
 static int qlcnic_get_intr_coalesce(struct net_device *netdev,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index d262211b03b3..34e467b239a1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -698,7 +698,6 @@ struct qlcnic_legacy_intr_set {
 };
 
 #define QLCNIC_MSIX_BASE	0x132110
-#define QLCNIC_MAX_PCI_FUNC	8
 #define QLCNIC_MAX_VLAN_FILTERS	64
 
 #define FLASH_ROM_WINDOW	0x42110030
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 6f7f60c09f07..03d18a0be6ce 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -455,14 +455,14 @@ int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
 
 int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
 {
+	struct qlcnic_mac_vlan_list *cur;
 	struct list_head *head;
-	struct qlcnic_mac_list_s *cur;
 	int err = -EINVAL;
 
 	/* Delete MAC from the existing list */
 	list_for_each(head, &adapter->mac_list) {
-		cur = list_entry(head, struct qlcnic_mac_list_s, list);
-		if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+		cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+		if (ether_addr_equal(addr, cur->mac_addr)) {
 			err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
 							0, QLCNIC_MAC_DEL);
 			if (err)
@@ -477,17 +477,18 @@ int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
 
 int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan)
 {
+	struct qlcnic_mac_vlan_list *cur;
 	struct list_head *head;
-	struct qlcnic_mac_list_s *cur;
 
 	/* look up if already exists */
 	list_for_each(head, &adapter->mac_list) {
-		cur = list_entry(head, struct qlcnic_mac_list_s, list);
-		if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
+		cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+		if (ether_addr_equal(addr, cur->mac_addr) &&
+		    cur->vlan_id == vlan)
 			return 0;
 	}
 
-	cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
+	cur = kzalloc(sizeof(*cur), GFP_ATOMIC);
 	if (cur == NULL)
 		return -ENOMEM;
 
@@ -499,11 +500,12 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan)
 		return -EIO;
 	}
 
+	cur->vlan_id = vlan;
 	list_add_tail(&cur->list, &adapter->mac_list);
 	return 0;
 }
 
-void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
+static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -516,8 +518,7 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
 	if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
 		return;
 
-	if (!qlcnic_sriov_vf_check(adapter))
-		qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan);
+	qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan);
 	qlcnic_nic_add_mac(adapter, bcast_addr, vlan);
 
 	if (netdev->flags & IFF_PROMISC) {
@@ -526,15 +527,11 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
 	} else if ((netdev->flags & IFF_ALLMULTI) ||
 		   (netdev_mc_count(netdev) > ahw->max_mc_count)) {
 		mode = VPORT_MISS_MODE_ACCEPT_MULTI;
-	} else if (!netdev_mc_empty(netdev) &&
-		   !qlcnic_sriov_vf_check(adapter)) {
+	} else if (!netdev_mc_empty(netdev)) {
 		netdev_for_each_mc_addr(ha, netdev)
 			qlcnic_nic_add_mac(adapter, ha->addr, vlan);
 	}
 
-	if (qlcnic_sriov_vf_check(adapter))
-		qlcnic_vf_add_mc_list(netdev, vlan);
-
 	/* configure unicast MAC address, if there is not sufficient space
 	 * to store all the unicast addresses then enable promiscuous mode
 	 */
@@ -545,14 +542,15 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
 			qlcnic_nic_add_mac(adapter, ha->addr, vlan);
 	}
 
-	if (!qlcnic_sriov_vf_check(adapter)) {
-		if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
-		    !adapter->fdb_mac_learn) {
-			qlcnic_alloc_lb_filters_mem(adapter);
-			adapter->drv_mac_learn = true;
-		} else {
-			adapter->drv_mac_learn = false;
-		}
+	if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+	    !adapter->fdb_mac_learn) {
+		qlcnic_alloc_lb_filters_mem(adapter);
+		adapter->drv_mac_learn = 1;
+		if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+			adapter->rx_mac_learn = true;
+	} else {
+		adapter->drv_mac_learn = 0;
+		adapter->rx_mac_learn = false;
 	}
 
 	qlcnic_nic_set_promisc(adapter, mode);
@@ -561,16 +559,17 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
 void qlcnic_set_multi(struct net_device *netdev)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
+	struct qlcnic_mac_vlan_list *cur;
 	struct netdev_hw_addr *ha;
-	struct qlcnic_mac_list_s *cur;
+	size_t temp;
 
 	if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
 		return;
 	if (qlcnic_sriov_vf_check(adapter)) {
 		if (!netdev_mc_empty(netdev)) {
 			netdev_for_each_mc_addr(ha, netdev) {
-				cur = kzalloc(sizeof(struct qlcnic_mac_list_s),
-					      GFP_ATOMIC);
+				temp = sizeof(struct qlcnic_mac_vlan_list);
+				cur = kzalloc(temp, GFP_ATOMIC);
 				if (cur == NULL)
 					break;
 				memcpy(cur->mac_addr,
@@ -605,11 +604,11 @@ int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
 
 void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter)
 {
-	struct qlcnic_mac_list_s *cur;
 	struct list_head *head = &adapter->mac_list;
+	struct qlcnic_mac_vlan_list *cur;
 
 	while (!list_empty(head)) {
-		cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+		cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
 		qlcnic_sre_macaddr_change(adapter,
 				cur->mac_addr, 0, QLCNIC_MAC_DEL);
 		list_del(&cur->list);
@@ -756,10 +755,7 @@ int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *adapter)
 	return 0;
 }
 
-/*
- * Send the interrupt coalescing parameter set by ethtool to the card.
- */
-void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *adapter)
 {
 	struct qlcnic_nic_req req;
 	int rv;
@@ -781,10 +777,32 @@ void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter)
 	if (rv != 0)
 		dev_err(&adapter->netdev->dev,
 			"Could not send interrupt coalescing parameters\n");
+
+	return rv;
+}
+
+/* Send the interrupt coalescing parameter set by ethtool to the card. */
+int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter,
+				     struct ethtool_coalesce *ethcoal)
+{
+	struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+	int rv;
+
+	coal->flag = QLCNIC_INTR_DEFAULT;
+	coal->rx_time_us = ethcoal->rx_coalesce_usecs;
+	coal->rx_packets = ethcoal->rx_max_coalesced_frames;
+
+	rv = qlcnic_82xx_set_rx_coalesce(adapter);
+
+	if (rv)
+		netdev_err(adapter->netdev,
+			   "Failed to set Rx coalescing parametrs\n");
+
+	return rv;
 }
 
-#define QLCNIC_ENABLE_IPV4_LRO		1
-#define QLCNIC_ENABLE_IPV6_LRO		2
+#define QLCNIC_ENABLE_IPV4_LRO		BIT_0
+#define QLCNIC_ENABLE_IPV6_LRO		(BIT_1 | BIT_9)
 
 int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
 {
@@ -948,7 +966,7 @@ int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int enable)
 	return rv;
 }
 
-int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
+static int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
 {
 	struct qlcnic_nic_req req;
 	u64 word;
@@ -1247,7 +1265,7 @@ static int qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter,
 	return 0;
 }
 
-void
+static void
 qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
 {
 	void __iomem *addr = adapter->ahw->pci_base0 +
@@ -1258,7 +1276,7 @@ qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
 	mutex_unlock(&adapter->ahw->mem_lock);
 }
 
-void
+static void
 qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
 {
 	void __iomem *addr = adapter->ahw->pci_base0 +
@@ -1494,7 +1512,7 @@ int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
 	return 0;
 }
 
-int
+static int
 qlcnic_wol_supported(struct qlcnic_adapter *adapter)
 {
 	u32 wol_cfg;
@@ -1534,19 +1552,34 @@ int qlcnic_82xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
 	return rv;
 }
 
-int qlcnic_get_beacon_state(struct qlcnic_adapter *adapter, u8 *h_state)
+void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *adapter)
 {
+	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	struct qlcnic_cmd_args cmd;
-	int err;
+	u8 beacon_state;
+	int err = 0;
 
-	err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_STATUS);
-	if (!err) {
-		err = qlcnic_issue_cmd(adapter, &cmd);
-		if (!err)
-			*h_state = cmd.rsp.arg[1];
+	if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
+		err = qlcnic_alloc_mbx_args(&cmd, adapter,
+					    QLCNIC_CMD_GET_LED_STATUS);
+		if (!err) {
+			err = qlcnic_issue_cmd(adapter, &cmd);
+			if (err) {
+				netdev_err(adapter->netdev,
+					   "Failed to get current beacon state, err=%d\n",
+					   err);
+			} else {
+				beacon_state = cmd.rsp.arg[1];
+				if (beacon_state == QLCNIC_BEACON_DISABLE)
+					ahw->beacon_state = QLCNIC_BEACON_OFF;
+				else if (beacon_state == QLCNIC_BEACON_EANBLE)
+					ahw->beacon_state = QLCNIC_BEACON_ON;
+			}
+		}
+		qlcnic_free_mbx_args(&cmd);
 	}
-	qlcnic_free_mbx_args(&cmd);
-	return err;
+
+	return;
 }
 
 void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 13303e7d1ed7..63d75617d445 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -162,16 +162,18 @@ struct qlcnic_host_tx_ring;
 struct qlcnic_hardware_context;
 struct qlcnic_adapter;
 
-int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
 int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *);
 int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
 int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int);
 int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
 int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
 			 struct net_device *netdev);
+void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
 void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
 			       u64 *uaddr, u16 vlan_id);
-void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter);
+int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
+				     struct ethtool_coalesce *);
+int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
 int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int);
 void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
 			       __be32, int);
@@ -181,9 +183,6 @@ int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8);
 int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
 void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
 void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
-void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *);
-irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
 int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
 			  struct qlcnic_cmd_args *);
 int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *, int);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index ad1531ae3aa8..30874cda8476 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -124,41 +124,16 @@
 #define qlcnic_83xx_is_ip_align(sts)	(((sts) >> 46) & 1)
 #define qlcnic_83xx_has_vlan_tag(sts)	(((sts) >> 47) & 1)
 
-struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
-				     struct qlcnic_host_rds_ring *, u16, u16);
+static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
+				   int max);
 
-inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
-				  struct qlcnic_host_tx_ring *tx_ring)
-{
-	if (qlcnic_check_multi_tx(adapter) &&
-	    !adapter->ahw->diag_test)
-		writel(0x0, tx_ring->crb_intr_mask);
-}
-
-
-static inline void qlcnic_disable_tx_int(struct qlcnic_adapter *adapter,
-					 struct qlcnic_host_tx_ring *tx_ring)
-{
-	if (qlcnic_check_multi_tx(adapter) &&
-	    !adapter->ahw->diag_test)
-		writel(1, tx_ring->crb_intr_mask);
-}
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
+					    struct qlcnic_host_rds_ring *,
+					    u16, u16);
 
-inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
-				       struct qlcnic_host_tx_ring *tx_ring)
+static inline u8 qlcnic_mac_hash(u64 mac, u16 vlan)
 {
-	writel(0, tx_ring->crb_intr_mask);
-}
-
-inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
-					struct qlcnic_host_tx_ring *tx_ring)
-{
-	writel(1, tx_ring->crb_intr_mask);
-}
-
-static inline u8 qlcnic_mac_hash(u64 mac)
-{
-	return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff));
+	return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff) ^ (vlan & 0xff));
 }
 
 static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
@@ -202,7 +177,7 @@ static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
 	struct hlist_node *n;
 
 	hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
-		if (!memcmp(tmp_fil->faddr, addr, ETH_ALEN) &&
+		if (ether_addr_equal(tmp_fil->faddr, addr) &&
 		    tmp_fil->vlan_id == vlan_id)
 			return tmp_fil;
 	}
@@ -210,8 +185,8 @@ static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
 	return NULL;
 }
 
-void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
-			  int loopback_pkt, u16 vlan_id)
+static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
+				 struct sk_buff *skb, int loopback_pkt, u16 vlan_id)
 {
 	struct ethhdr *phdr = (struct ethhdr *)(skb->data);
 	struct qlcnic_filter *fil, *tmp_fil;
@@ -221,8 +196,11 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
 	u8 hindex, op;
 	int ret;
 
+	if (!qlcnic_sriov_pf_check(adapter) || (vlan_id == 0xffff))
+		vlan_id = 0;
+
 	memcpy(&src_addr, phdr->h_source, ETH_ALEN);
-	hindex = qlcnic_mac_hash(src_addr) &
+	hindex = qlcnic_mac_hash(src_addr, vlan_id) &
 		 (adapter->fhash.fbucket_size - 1);
 
 	if (loopback_pkt) {
@@ -322,31 +300,47 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
 			       struct cmd_desc_type0 *first_desc,
 			       struct sk_buff *skb)
 {
+	struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
+	struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+	struct net_device *netdev = adapter->netdev;
+	u16 protocol = ntohs(skb->protocol);
 	struct qlcnic_filter *fil, *tmp_fil;
-	struct hlist_node *n;
 	struct hlist_head *head;
-	struct net_device *netdev = adapter->netdev;
-	struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+	struct hlist_node *n;
 	u64 src_addr = 0;
 	u16 vlan_id = 0;
-	u8 hindex;
+	u8 hindex, hval;
 
-	if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
-		return;
+	if (!qlcnic_sriov_pf_check(adapter)) {
+		if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
+			return;
+	} else {
+		if (protocol == ETH_P_8021Q) {
+			vh = (struct vlan_ethhdr *)skb->data;
+			vlan_id = ntohs(vh->h_vlan_TCI);
+		} else if (vlan_tx_tag_present(skb)) {
+			vlan_id = vlan_tx_tag_get(skb);
+		}
+
+		if (ether_addr_equal(phdr->h_source, adapter->mac_addr) &&
+		    !vlan_id)
+			return;
+	}
 
 	if (adapter->fhash.fnum >= adapter->fhash.fmax) {
 		adapter->stats.mac_filter_limit_overrun++;
-		netdev_info(netdev, "Can not add more than %d mac addresses\n",
-			    adapter->fhash.fmax);
+		netdev_info(netdev, "Can not add more than %d mac-vlan filters, configured %d\n",
+			    adapter->fhash.fmax, adapter->fhash.fnum);
 		return;
 	}
 
 	memcpy(&src_addr, phdr->h_source, ETH_ALEN);
-	hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
+	hval = qlcnic_mac_hash(src_addr, vlan_id);
+	hindex = hval & (adapter->fhash.fbucket_size - 1);
 	head = &(adapter->fhash.fhead[hindex]);
 
 	hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
-		if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
+		if (ether_addr_equal(tmp_fil->faddr, (u8 *)&src_addr) &&
 		    tmp_fil->vlan_id == vlan_id) {
 			if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
 				qlcnic_change_filter(adapter, &src_addr,
@@ -862,7 +856,7 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
 	if ((work_done < budget) && tx_complete) {
 		napi_complete(&sds_ring->napi);
 		if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
-			qlcnic_enable_int(sds_ring);
+			qlcnic_enable_sds_intr(adapter, sds_ring);
 			qlcnic_enable_tx_intr(adapter, tx_ring);
 		}
 	}
@@ -903,7 +897,7 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
 	if (work_done < budget) {
 		napi_complete(&sds_ring->napi);
 		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
-			qlcnic_enable_int(sds_ring);
+			qlcnic_enable_sds_intr(adapter, sds_ring);
 	}
 
 	return work_done;
@@ -1015,9 +1009,9 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
 	}
 }
 
-struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
-				     struct qlcnic_host_rds_ring *ring,
-				     u16 index, u16 cksum)
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+					    struct qlcnic_host_rds_ring *ring,
+					    u16 index, u16 cksum)
 {
 	struct qlcnic_rx_buffer *buffer;
 	struct sk_buff *skb;
@@ -1236,7 +1230,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
 	return buffer;
 }
 
-int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
+static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
 {
 	struct qlcnic_host_rds_ring *rds_ring;
 	struct qlcnic_adapter *adapter = sds_ring->adapter;
@@ -1466,8 +1460,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
 	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
 		if (qlcnic_check_multi_tx(adapter) &&
-		    !adapter->ahw->diag_test &&
-		    (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
+		    !adapter->ahw->diag_test) {
 			netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
 				       NAPI_POLL_WEIGHT);
 		} else {
@@ -1535,13 +1528,12 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
 	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
 		napi_enable(&sds_ring->napi);
-		qlcnic_enable_int(sds_ring);
+		qlcnic_enable_sds_intr(adapter, sds_ring);
 	}
 
 	if (qlcnic_check_multi_tx(adapter) &&
 	    (adapter->flags & QLCNIC_MSIX_ENABLED) &&
-	    !adapter->ahw->diag_test &&
-	    (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) {
+	    !adapter->ahw->diag_test) {
 		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
 			tx_ring = &adapter->tx_ring[ring];
 			napi_enable(&tx_ring->napi);
@@ -1562,7 +1554,7 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
 
 	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
-		qlcnic_disable_int(sds_ring);
+		qlcnic_disable_sds_intr(adapter, sds_ring);
 		napi_synchronize(&sds_ring->napi);
 		napi_disable(&sds_ring->napi);
 	}
@@ -1572,7 +1564,7 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
 	    qlcnic_check_multi_tx(adapter)) {
 		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
 			tx_ring = &adapter->tx_ring[ring];
-			qlcnic_disable_tx_int(adapter, tx_ring);
+			qlcnic_disable_tx_intr(adapter, tx_ring);
 			napi_synchronize(&tx_ring->napi);
 			napi_disable(&tx_ring->napi);
 		}
@@ -1601,7 +1593,8 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
 	struct sk_buff *skb;
 	struct qlcnic_host_rds_ring *rds_ring;
 	int index, length, cksum, is_lb_pkt;
-	u16 vid = 0xffff, t_vid;
+	u16 vid = 0xffff;
+	int err;
 
 	if (unlikely(ring >= adapter->max_rds_rings))
 		return NULL;
@@ -1619,19 +1612,19 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
 	if (!skb)
 		return buffer;
 
-	if (adapter->drv_mac_learn &&
-	    (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
-		t_vid = 0;
-		is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
-		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
-	}
-
 	if (length > rds_ring->skb_size)
 		skb_put(skb, rds_ring->skb_size);
 	else
 		skb_put(skb, length);
 
-	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+	err = qlcnic_check_rx_tagging(adapter, skb, &vid);
+
+	if (adapter->rx_mac_learn) {
+		is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
+		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
+	}
+
+	if (unlikely(err)) {
 		adapter->stats.rxdropped++;
 		dev_kfree_skb(skb);
 		return buffer;
@@ -1666,7 +1659,8 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
 	int l2_hdr_offset, l4_hdr_offset;
 	int index, is_lb_pkt;
 	u16 lro_length, length, data_offset, gso_size;
-	u16 vid = 0xffff, t_vid;
+	u16 vid = 0xffff;
+	int err;
 
 	if (unlikely(ring > adapter->max_rds_rings))
 		return NULL;
@@ -1688,12 +1682,6 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
 	if (!skb)
 		return buffer;
 
-	if (adapter->drv_mac_learn &&
-	    (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
-		t_vid = 0;
-		is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
-		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
-	}
 	if (qlcnic_83xx_is_tstamp(sts_data[1]))
 		data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
 	else
@@ -1702,7 +1690,14 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
 	skb_put(skb, lro_length + data_offset);
 	skb_pull(skb, l2_hdr_offset);
 
-	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+	err = qlcnic_check_rx_tagging(adapter, skb, &vid);
+
+	if (adapter->rx_mac_learn) {
+		is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
+		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
+	}
+
+	if (unlikely(err)) {
 		adapter->stats.rxdropped++;
 		dev_kfree_skb(skb);
 		return buffer;
@@ -1832,7 +1827,7 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
 	work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
 	if ((work_done < budget) && tx_complete) {
 		napi_complete(&sds_ring->napi);
-		qlcnic_83xx_enable_intr(adapter, sds_ring);
+		qlcnic_enable_sds_intr(adapter, sds_ring);
 	}
 
 	return work_done;
@@ -1855,7 +1850,7 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
 	work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
 	if ((work_done < budget) && tx_complete) {
 		napi_complete(&sds_ring->napi);
-		qlcnic_83xx_enable_intr(adapter, sds_ring);
+		qlcnic_enable_sds_intr(adapter, sds_ring);
 	}
 
 	return work_done;
@@ -1874,7 +1869,7 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
 	if (work_done) {
 		napi_complete(&tx_ring->napi);
 		if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
-			qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
+			qlcnic_enable_tx_intr(adapter, tx_ring);
 	}
 
 	return work_done;
@@ -1892,7 +1887,7 @@ static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
 	if (work_done < budget) {
 		napi_complete(&sds_ring->napi);
 		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
-			qlcnic_83xx_enable_intr(adapter, sds_ring);
+			qlcnic_enable_sds_intr(adapter, sds_ring);
 	}
 
 	return work_done;
@@ -1912,7 +1907,7 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
 		sds_ring = &recv_ctx->sds_rings[ring];
 		napi_enable(&sds_ring->napi);
 		if (adapter->flags & QLCNIC_MSIX_ENABLED)
-			qlcnic_83xx_enable_intr(adapter, sds_ring);
+			qlcnic_enable_sds_intr(adapter, sds_ring);
 	}
 
 	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
@@ -1920,7 +1915,7 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
 		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
 			tx_ring = &adapter->tx_ring[ring];
 			napi_enable(&tx_ring->napi);
-			qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
+			qlcnic_enable_tx_intr(adapter, tx_ring);
 		}
 	}
 }
@@ -1938,7 +1933,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
 	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
 		if (adapter->flags & QLCNIC_MSIX_ENABLED)
-			qlcnic_83xx_disable_intr(adapter, sds_ring);
+			qlcnic_disable_sds_intr(adapter, sds_ring);
 		napi_synchronize(&sds_ring->napi);
 		napi_disable(&sds_ring->napi);
 	}
@@ -1947,7 +1942,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
 	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
 		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
 			tx_ring = &adapter->tx_ring[ring];
-			qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
+			qlcnic_disable_tx_intr(adapter, tx_ring);
 			napi_synchronize(&tx_ring->napi);
 			napi_disable(&tx_ring->napi);
 		}
@@ -2027,8 +2022,8 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
 	qlcnic_free_tx_rings(adapter);
 }
 
-void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
-				  int ring, u64 sts_data[])
+static void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
+					 int ring, u64 sts_data[])
 {
 	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
 	struct sk_buff *skb;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 550791b8fbae..1f79d47c45fa 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -81,6 +81,16 @@ static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
 static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16);
 static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16);
 
+static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *);
+static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
+static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *);
+static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
+static void qlcnic_82xx_io_resume(struct pci_dev *);
+static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
+						      pci_channel_state_t);
+
 static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
 {
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -308,12 +318,12 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
 
 static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
 {
-	struct qlcnic_mac_list_s *cur;
+	struct qlcnic_mac_vlan_list *cur;
 	struct list_head *head;
 
 	list_for_each(head, &adapter->mac_list) {
-		cur = list_entry(head, struct qlcnic_mac_list_s, list);
-		if (!memcmp(adapter->mac_addr, cur->mac_addr, ETH_ALEN)) {
+		cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+		if (ether_addr_equal_unaligned(adapter->mac_addr, cur->mac_addr)) {
 			qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
 						  0, QLCNIC_MAC_DEL);
 			list_del(&cur->list);
@@ -337,7 +347,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
 	if (!is_valid_ether_addr(addr->sa_data))
 		return -EINVAL;
 
-	if (!memcmp(adapter->mac_addr, addr->sa_data, ETH_ALEN))
+	if (ether_addr_equal_unaligned(adapter->mac_addr, addr->sa_data))
 		return 0;
 
 	if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
@@ -546,6 +556,11 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
 	.io_error_detected		= qlcnic_82xx_io_error_detected,
 	.io_slot_reset			= qlcnic_82xx_io_slot_reset,
 	.io_resume			= qlcnic_82xx_io_resume,
+	.get_beacon_state		= qlcnic_82xx_get_beacon_state,
+	.enable_sds_intr		= qlcnic_82xx_enable_sds_intr,
+	.disable_sds_intr		= qlcnic_82xx_disable_sds_intr,
+	.enable_tx_intr			= qlcnic_82xx_enable_tx_intr,
+	.disable_tx_intr		= qlcnic_82xx_disable_tx_intr,
 };
 
 static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
@@ -588,9 +603,6 @@ void qlcnic_set_tx_ring_count(struct qlcnic_adapter *adapter, u8 tx_cnt)
 							 QLCNIC_TX_QUEUE);
 	else
 		adapter->drv_tx_rings = tx_cnt;
-
-	dev_info(&adapter->pdev->dev, "Set %d Tx rings\n",
-		 adapter->drv_tx_rings);
 }
 
 void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt)
@@ -601,25 +613,79 @@ void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt)
 							  QLCNIC_RX_QUEUE);
 	else
 		adapter->drv_sds_rings = rx_cnt;
-
-	dev_info(&adapter->pdev->dev, "Set %d SDS rings\n",
-		 adapter->drv_sds_rings);
 }
 
-int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
+int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
 {
 	struct pci_dev *pdev = adapter->pdev;
-	int drv_tx_rings, drv_sds_rings, tx_vector;
-	int err = -1, i;
+	int num_msix = 0, err = 0, vector;
+
+	adapter->flags &= ~QLCNIC_TSS_RSS;
+
+	if (adapter->drv_tss_rings > 0)
+		num_msix += adapter->drv_tss_rings;
+	else
+		num_msix += adapter->drv_tx_rings;
+
+	if (adapter->drv_rss_rings  > 0)
+		num_msix += adapter->drv_rss_rings;
+	else
+		num_msix += adapter->drv_sds_rings;
+
+	if (qlcnic_83xx_check(adapter))
+		num_msix += 1;
+
+	if (!adapter->msix_entries) {
+		adapter->msix_entries = kcalloc(num_msix,
+						sizeof(struct msix_entry),
+						GFP_KERNEL);
+		if (!adapter->msix_entries)
+			return -ENOMEM;
+	}
+
+restore:
+	for (vector = 0; vector < num_msix; vector++)
+		adapter->msix_entries[vector].entry = vector;
+
+	err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
+	if (err == 0) {
+		adapter->ahw->num_msix = num_msix;
+		if (adapter->drv_tss_rings > 0)
+			adapter->drv_tx_rings = adapter->drv_tss_rings;
 
-	if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
-		drv_tx_rings = 0;
-		tx_vector = 0;
+		if (adapter->drv_rss_rings > 0)
+			adapter->drv_sds_rings = adapter->drv_rss_rings;
 	} else {
-		drv_tx_rings = adapter->drv_tx_rings;
-		tx_vector = 1;
+		netdev_info(adapter->netdev,
+			    "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
+			    num_msix, err);
+
+		num_msix = adapter->drv_tx_rings + adapter->drv_sds_rings;
+
+		/* Set rings to 0 so we can restore original TSS/RSS count */
+		adapter->drv_tss_rings = 0;
+		adapter->drv_rss_rings = 0;
+
+		if (qlcnic_83xx_check(adapter))
+			num_msix += 1;
+
+		netdev_info(adapter->netdev,
+			    "Restoring %d Tx, %d SDS rings for total %d vectors.\n",
+			    adapter->drv_tx_rings, adapter->drv_sds_rings,
+			    num_msix);
+		goto restore;
+
+		err = -EIO;
 	}
 
+	return err;
+}
+
+int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int err = -1, vector;
+
 	if (!adapter->msix_entries) {
 		adapter->msix_entries = kcalloc(num_msix,
 						sizeof(struct msix_entry),
@@ -628,48 +694,43 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
 			return -ENOMEM;
 	}
 
-	adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
 	adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
 
 	if (adapter->ahw->msix_supported) {
- enable_msix:
-		for (i = 0; i < num_msix; i++)
-			adapter->msix_entries[i].entry = i;
+enable_msix:
+		for (vector = 0; vector < num_msix; vector++)
+			adapter->msix_entries[vector].entry = vector;
+
 		err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
 		if (err == 0) {
 			adapter->flags |= QLCNIC_MSIX_ENABLED;
-			if (qlcnic_83xx_check(adapter)) {
-				adapter->ahw->num_msix = num_msix;
-				/* subtract mail box and tx ring vectors */
-				adapter->drv_sds_rings = num_msix -
-							 drv_tx_rings - 1;
-			} else {
-				adapter->ahw->num_msix = num_msix;
-				if (qlcnic_check_multi_tx(adapter) &&
-				    !adapter->ahw->diag_test &&
-				    (adapter->drv_tx_rings > 1))
-					drv_sds_rings = num_msix - drv_tx_rings;
-				else
-					drv_sds_rings = num_msix;
-
-				adapter->drv_sds_rings = drv_sds_rings;
-			}
+			adapter->ahw->num_msix = num_msix;
 			dev_info(&pdev->dev, "using msi-x interrupts\n");
 			return err;
 		} else if (err > 0) {
 			dev_info(&pdev->dev,
-				 "Unable to allocate %d MSI-X interrupt vectors\n",
-				 num_msix);
-			if (qlcnic_83xx_check(adapter)) {
-				if (err < (QLC_83XX_MINIMUM_VECTOR - tx_vector))
-					return err;
-				err -= drv_tx_rings + 1;
+				 "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
+				 num_msix, err);
+
+			if (qlcnic_82xx_check(adapter)) {
 				num_msix = rounddown_pow_of_two(err);
-				num_msix += drv_tx_rings + 1;
+				if (err < QLCNIC_82XX_MINIMUM_VECTOR)
+					return -EIO;
 			} else {
-				num_msix = rounddown_pow_of_two(err);
-				if (qlcnic_check_multi_tx(adapter))
-					num_msix += drv_tx_rings;
+				num_msix = rounddown_pow_of_two(err - 1);
+				num_msix += 1;
+				if (err < QLCNIC_83XX_MINIMUM_VECTOR)
+					return -EIO;
+			}
+
+			if (qlcnic_82xx_check(adapter) &&
+			    !qlcnic_check_multi_tx(adapter)) {
+				adapter->drv_sds_rings = num_msix;
+				adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+			} else {
+				/* Distribute vectors equally */
+				adapter->drv_tx_rings = num_msix / 2;
+				adapter->drv_sds_rings = adapter->drv_tx_rings;
 			}
 
 			if (num_msix) {
@@ -680,14 +741,29 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
 			}
 		} else {
 			dev_info(&pdev->dev,
-				 "Unable to allocate %d MSI-X interrupt vectors\n",
-				 num_msix);
+				 "Unable to allocate %d MSI-X vectors, err=%d\n",
+				 num_msix, err);
+			return err;
 		}
 	}
 
 	return err;
 }
 
+static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
+{
+	int num_msix;
+
+	num_msix = adapter->drv_sds_rings;
+
+	if (qlcnic_check_multi_tx(adapter))
+		num_msix += adapter->drv_tx_rings;
+	else
+		num_msix += QLCNIC_SINGLE_RING;
+
+	return num_msix;
+}
+
 static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
 {
 	int err = 0;
@@ -722,25 +798,29 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
 	return err;
 }
 
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter)
 {
 	int num_msix, err = 0;
 
-	num_msix = adapter->drv_sds_rings;
-
-	if (qlcnic_check_multi_tx(adapter))
-		num_msix += adapter->drv_tx_rings;
+	if (adapter->flags & QLCNIC_TSS_RSS) {
+		err = qlcnic_setup_tss_rss_intr(adapter);
+		if (err < 0)
+			return err;
+		num_msix = adapter->ahw->num_msix;
+	} else {
+		num_msix = qlcnic_82xx_calculate_msix_vector(adapter);
 
-	err = qlcnic_enable_msix(adapter, num_msix);
-	if (err == -ENOMEM)
-		return err;
+		err = qlcnic_enable_msix(adapter, num_msix);
+		if (err == -ENOMEM)
+			return err;
 
-	if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
-		qlcnic_disable_multi_tx(adapter);
+		if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+			qlcnic_disable_multi_tx(adapter);
 
-		err = qlcnic_enable_msi_legacy(adapter);
-		if (!err)
-			return err;
+			err = qlcnic_enable_msi_legacy(adapter);
+			if (!err)
+				return err;
+		}
 	}
 
 	return 0;
@@ -800,25 +880,26 @@ static void qlcnic_cleanup_pci_map(struct qlcnic_hardware_context *ahw)
 
 static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
 {
+	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	struct qlcnic_pci_info *pci_info;
 	int ret;
 
 	if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
-		switch (adapter->ahw->port_type) {
+		switch (ahw->port_type) {
 		case QLCNIC_GBE:
-			adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_GBE_PORTS;
+			ahw->total_nic_func = QLCNIC_NIU_MAX_GBE_PORTS;
 			break;
 		case QLCNIC_XGBE:
-			adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_XG_PORTS;
+			ahw->total_nic_func = QLCNIC_NIU_MAX_XG_PORTS;
 			break;
 		}
 		return 0;
 	}
 
-	if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+	if (ahw->op_mode == QLCNIC_MGMT_FUNC)
 		return 0;
 
-	pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+	pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
 	if (!pci_info)
 		return -ENOMEM;
 
@@ -846,12 +927,13 @@ static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
 
 int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
 {
+	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	struct qlcnic_pci_info *pci_info;
 	int i, id = 0, ret = 0, j = 0;
 	u16 act_pci_func;
 	u8 pfn;
 
-	pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+	pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
 	if (!pci_info)
 		return -ENOMEM;
 
@@ -859,7 +941,7 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
 	if (ret)
 		goto err_pci_info;
 
-	act_pci_func = adapter->ahw->act_pci_func;
+	act_pci_func = ahw->total_nic_func;
 
 	adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
 				 act_pci_func, GFP_KERNEL);
@@ -875,10 +957,10 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
 		goto err_npars;
 	}
 
-	for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+	for (i = 0; i < ahw->max_vnic_func; i++) {
 		pfn = pci_info[i].id;
 
-		if (pfn >= QLCNIC_MAX_PCI_FUNC) {
+		if (pfn >= ahw->max_vnic_func) {
 			ret = QL_STATUS_INVALID_PARAM;
 			goto err_eswitch;
 		}
@@ -1346,7 +1428,7 @@ int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
 	if (adapter->need_fw_reset)
 		return 0;
 
-	for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+	for (i = 0; i < adapter->ahw->total_nic_func; i++) {
 		if (!adapter->npars[i].eswitch_status)
 			continue;
 
@@ -1409,7 +1491,7 @@ int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
 			return 0;
 
 	/* Set the NPAR config data after FW reset */
-	for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+	for (i = 0; i < adapter->ahw->total_nic_func; i++) {
 		npar = &adapter->npars[i];
 		pci_func = npar->pci_func;
 		if (!adapter->npars[i].eswitch_status)
@@ -1484,7 +1566,7 @@ qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
 	return err;
 }
 
-int qlcnic_82xx_start_firmware(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *adapter)
 {
 	int err;
 
@@ -1685,6 +1767,33 @@ static void qlcnic_get_lro_mss_capability(struct qlcnic_adapter *adapter)
 	}
 }
 
+static int qlcnic_config_def_intr_coalesce(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_hardware_context *ahw = adapter->ahw;
+	int err;
+
+	/* Initialize interrupt coalesce parameters */
+	ahw->coal.flag = QLCNIC_INTR_DEFAULT;
+
+	if (qlcnic_83xx_check(adapter)) {
+		ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+		ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+		ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+		ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+		ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+
+		err = qlcnic_83xx_set_rx_tx_intr_coal(adapter);
+	} else {
+		ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
+		ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+		ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+
+		err = qlcnic_82xx_set_rx_coalesce(adapter);
+	}
+
+	return err;
+}
+
 int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
 {
 	int ring;
@@ -1717,7 +1826,7 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
 	if (adapter->drv_sds_rings > 1)
 		qlcnic_config_rss(adapter, 1);
 
-	qlcnic_config_intr_coalesce(adapter);
+	qlcnic_config_def_intr_coalesce(adapter);
 
 	if (netdev->features & NETIF_F_LRO)
 		qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
@@ -1862,7 +1971,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int drv_sds_rings)
 	if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
 		for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 			sds_ring = &adapter->recv_ctx->sds_rings[ring];
-			qlcnic_disable_int(sds_ring);
+			qlcnic_disable_sds_intr(adapter, sds_ring);
 		}
 	}
 
@@ -1885,7 +1994,6 @@ out:
 
 static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
 {
-	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	int err = 0;
 
 	adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
@@ -1894,15 +2002,7 @@ static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
 		err = -ENOMEM;
 		goto err_out;
 	}
-	/* Initialize interrupt coalesce parameters */
-	ahw->coal.flag = QLCNIC_INTR_DEFAULT;
-	ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
-	ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
-	ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
-	if (qlcnic_83xx_check(adapter)) {
-		ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
-		ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
-	}
+
 	/* clear stats */
 	memset(&adapter->stats, 0, sizeof(adapter->stats));
 err_out:
@@ -1963,7 +2063,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
 	if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
 		for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 			sds_ring = &adapter->recv_ctx->sds_rings[ring];
-			qlcnic_enable_int(sds_ring);
+			qlcnic_enable_sds_intr(adapter, sds_ring);
 		}
 	}
 
@@ -1995,7 +2095,7 @@ qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
 	netif_device_attach(netdev);
 
 	clear_bit(__QLCNIC_RESETTING, &adapter->state);
-	dev_err(&adapter->pdev->dev, "%s:\n", __func__);
+	netdev_info(adapter->netdev, "%s: soft reset complete\n", __func__);
 	return 0;
 }
 
@@ -2032,10 +2132,10 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
 	return err;
 }
 
-void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
+static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
 {
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
-	u16 act_pci_fn = ahw->act_pci_func;
+	u16 act_pci_fn = ahw->total_nic_func;
 	u16 count;
 
 	ahw->max_mc_count = QLCNIC_MAX_MC_COUNT;
@@ -2211,7 +2311,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	struct qlcnic_hardware_context *ahw;
 	int err, pci_using_dac = -1;
 	char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
-	struct qlcnic_dcb *dcb;
 
 	if (pdev->is_virtfn)
 		return -ENODEV;
@@ -2289,7 +2388,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto err_out_free_wq;
 
 	adapter->dev_rst_time = jiffies;
-	adapter->ahw->revision_id = pdev->revision;
+	ahw->revision_id = pdev->revision;
+	ahw->max_vnic_func = qlcnic_get_vnic_func_count(adapter);
 	if (qlcnic_mac_learn == FDB_MAC_LEARN)
 		adapter->fdb_mac_learn = true;
 	else if (qlcnic_mac_learn == DRV_MAC_LEARN)
@@ -2333,10 +2433,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 		adapter->flags |= QLCNIC_NEED_FLR;
 
-		dcb = adapter->dcb;
-
-		if (dcb && qlcnic_dcb_attach(dcb))
-			qlcnic_clear_dcb_ops(dcb);
 	} else if (qlcnic_83xx_check(adapter)) {
 		qlcnic_83xx_check_vf(adapter, ent);
 		adapter->portnum = adapter->ahw->pci_func;
@@ -2365,6 +2461,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto err_out_free_hw;
 	}
 
+	qlcnic_dcb_enable(adapter->dcb);
+
 	if (qlcnic_read_mac_addr(adapter))
 		dev_warn(&pdev->dev, "failed to read mac addr\n");
 
@@ -2498,13 +2596,11 @@ static void qlcnic_remove(struct pci_dev *pdev)
 	qlcnic_cancel_idc_work(adapter);
 	ahw = adapter->ahw;
 
-	qlcnic_dcb_free(adapter->dcb);
-
 	unregister_netdev(netdev);
 	qlcnic_sriov_cleanup(adapter);
 
 	if (qlcnic_83xx_check(adapter)) {
-		qlcnic_83xx_register_nic_idc_func(adapter, 0);
+		qlcnic_83xx_initialize_nic(adapter, 0);
 		cancel_delayed_work_sync(&adapter->idc_aen_work);
 		qlcnic_83xx_free_mbx_intr(adapter);
 		qlcnic_83xx_detach_mailbox_work(adapter);
@@ -2512,6 +2608,8 @@ static void qlcnic_remove(struct pci_dev *pdev)
 		kfree(ahw->fw_info);
 	}
 
+	qlcnic_dcb_free(adapter->dcb);
+
 	qlcnic_detach(adapter);
 
 	if (adapter->npars != NULL)
@@ -2640,7 +2738,7 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
 	if (adapter->fhash.fmax && adapter->fhash.fhead)
 		return;
 
-	act_pci_func = adapter->ahw->act_pci_func;
+	act_pci_func = adapter->ahw->total_nic_func;
 	spin_lock_init(&adapter->mac_learn_lock);
 	spin_lock_init(&adapter->rx_mac_learn_lock);
 
@@ -2737,12 +2835,58 @@ int qlcnic_check_temp(struct qlcnic_adapter *adapter)
 	return rv;
 }
 
-static void qlcnic_tx_timeout(struct net_device *netdev)
+static inline void dump_tx_ring_desc(struct qlcnic_host_tx_ring *tx_ring)
 {
-	struct qlcnic_adapter *adapter = netdev_priv(netdev);
+	int i;
+	struct cmd_desc_type0 *tx_desc_info;
+
+	for (i = 0; i < tx_ring->num_desc; i++) {
+		tx_desc_info = &tx_ring->desc_head[i];
+		pr_info("TX Desc: %d\n", i);
+		print_hex_dump(KERN_INFO, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
+			       &tx_ring->desc_head[i],
+			       sizeof(struct cmd_desc_type0), true);
+	}
+}
+
+static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
 	struct qlcnic_host_tx_ring *tx_ring;
 	int ring;
 
+	if (!netdev || !netif_running(netdev))
+		return;
+
+	for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+		tx_ring = &adapter->tx_ring[ring];
+		netdev_info(netdev, "Tx ring=%d Context Id=0x%x\n",
+			    ring, tx_ring->ctx_id);
+		netdev_info(netdev,
+			    "xmit_finished=%llu, xmit_called=%llu, xmit_on=%llu, xmit_off=%llu\n",
+			    tx_ring->tx_stats.xmit_finished,
+			    tx_ring->tx_stats.xmit_called,
+			    tx_ring->tx_stats.xmit_on,
+			    tx_ring->tx_stats.xmit_off);
+		netdev_info(netdev,
+			    "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
+			    readl(tx_ring->crb_intr_mask),
+			    readl(tx_ring->crb_cmd_producer),
+			    tx_ring->producer, tx_ring->sw_consumer,
+			    le32_to_cpu(*(tx_ring->hw_consumer)));
+
+		netdev_info(netdev, "Total desc=%d, Available desc=%d\n",
+			    tx_ring->num_desc, qlcnic_tx_avail(tx_ring));
+
+		if (netif_msg_tx_done(adapter->ahw))
+			dump_tx_ring_desc(tx_ring);
+	}
+}
+
+static void qlcnic_tx_timeout(struct net_device *netdev)
+{
+	struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
 	if (test_bit(__QLCNIC_RESETTING, &adapter->state))
 		return;
 
@@ -2755,22 +2899,7 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
 						      QLCNIC_FORCE_FW_DUMP_KEY);
 	} else {
 		netdev_info(netdev, "Tx timeout, reset adapter context.\n");
-		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
-			tx_ring = &adapter->tx_ring[ring];
-			netdev_info(netdev, "Tx ring=%d\n", ring);
-			netdev_info(netdev,
-				    "crb_intr_mask=%d, producer=%d, sw_consumer=%d, hw_consumer=%d\n",
-				    readl(tx_ring->crb_intr_mask),
-				    readl(tx_ring->crb_cmd_producer),
-				    tx_ring->sw_consumer,
-				    le32_to_cpu(*(tx_ring->hw_consumer)));
-			netdev_info(netdev,
-				    "xmit_finished=%llu, xmit_called=%llu, xmit_on=%llu, xmit_off=%llu\n",
-				    tx_ring->tx_stats.xmit_finished,
-				    tx_ring->tx_stats.xmit_called,
-				    tx_ring->tx_stats.xmit_on,
-				    tx_ring->tx_stats.xmit_off);
-		}
+		qlcnic_dump_tx_rings(adapter);
 		adapter->ahw->reset_context = 1;
 	}
 }
@@ -2793,7 +2922,7 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
 	return stats;
 }
 
-irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
+static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
 {
 	u32 status;
 
@@ -2832,7 +2961,7 @@ static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
 
 done:
 	adapter->ahw->diag_cnt++;
-	qlcnic_enable_int(sds_ring);
+	qlcnic_enable_sds_intr(adapter, sds_ring);
 	return IRQ_HANDLED;
 }
 
@@ -2880,17 +3009,39 @@ static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void qlcnic_poll_controller(struct net_device *netdev)
 {
-	int ring;
-	struct qlcnic_host_sds_ring *sds_ring;
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
-	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+	struct qlcnic_host_sds_ring *sds_ring;
+	struct qlcnic_recv_context *recv_ctx;
+	struct qlcnic_host_tx_ring *tx_ring;
+	int ring;
+
+	if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+		return;
+
+	recv_ctx = adapter->recv_ctx;
 
-	disable_irq(adapter->irq);
 	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
-		qlcnic_intr(adapter->irq, sds_ring);
+		qlcnic_disable_sds_intr(adapter, sds_ring);
+		napi_schedule(&sds_ring->napi);
+	}
+
+	if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+		/* Only Multi-Tx queue capable devices need to
+		 * schedule NAPI for TX rings
+		 */
+		if ((qlcnic_83xx_check(adapter) &&
+		     (adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
+		    (qlcnic_82xx_check(adapter) &&
+		     !qlcnic_check_multi_tx(adapter)))
+			return;
+
+		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+			tx_ring = &adapter->tx_ring[ring];
+			qlcnic_disable_tx_intr(adapter, tx_ring);
+			napi_schedule(&tx_ring->napi);
+		}
 	}
-	enable_irq(adapter->irq);
 }
 #endif
 
@@ -3286,7 +3437,8 @@ qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
 	qlcnic_api_unlock(adapter);
 }
 
-void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
+static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter,
+					  u32 key)
 {
 	u32 state, xg_val = 0, gb_val = 0;
 
@@ -3581,8 +3733,8 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
 	return err;
 }
 
-pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
-					       pci_channel_state_t state)
+static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
+						      pci_channel_state_t state)
 {
 	struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
 	struct net_device *netdev = adapter->netdev;
@@ -3612,13 +3764,13 @@ pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
 	return PCI_ERS_RESULT_NEED_RESET;
 }
 
-pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
+static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
 {
 	return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
 				PCI_ERS_RESULT_RECOVERED;
 }
 
-void qlcnic_82xx_io_resume(struct pci_dev *pdev)
+static void qlcnic_82xx_io_resume(struct pci_dev *pdev)
 {
 	u32 state;
 	struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
@@ -3726,12 +3878,6 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
 		return -EINVAL;
 	}
 
-	if (ring_cnt < 2) {
-		netdev_err(netdev,
-			   "%s rings value should not be lower than 2\n", buf);
-		return -EINVAL;
-	}
-
 	if (!is_power_of_2(ring_cnt)) {
 		netdev_err(netdev, "%s rings value should be a power of 2\n",
 			   buf);
@@ -3754,7 +3900,7 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
 	return 0;
 }
 
-int qlcnic_setup_rings(struct qlcnic_adapter *adapter, u8 rx_cnt, u8 tx_cnt)
+int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
 	int err;
@@ -3775,12 +3921,6 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter, u8 rx_cnt, u8 tx_cnt)
 
 	qlcnic_teardown_intr(adapter);
 
-	/* compute and set default and max tx/sds rings */
-	qlcnic_set_tx_ring_count(adapter, tx_cnt);
-	qlcnic_set_sds_ring_count(adapter, rx_cnt);
-
-	netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
-
 	err = qlcnic_setup_intr(adapter);
 	if (err) {
 		kfree(adapter->msix_entries);
@@ -3788,9 +3928,10 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter, u8 rx_cnt, u8 tx_cnt)
 		return err;
 	}
 
+	netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
+
 	if (qlcnic_83xx_check(adapter)) {
-		/* register for NIC IDC AEN Events */
-		qlcnic_83xx_register_nic_idc_func(adapter, 1);
+		qlcnic_83xx_initialize_nic(adapter, 1);
 		err = qlcnic_83xx_setup_mbx_intr(adapter);
 		qlcnic_83xx_disable_mbx_poll(adapter);
 		if (err) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 0daf660e12a1..396bd1fd1d27 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -126,8 +126,8 @@ struct qlcnic_vport {
 	u16			handle;
 	u16			max_tx_bw;
 	u16			min_tx_bw;
+	u16			pvid;
 	u8			vlan_mode;
-	u16			vlan;
 	u8			qos;
 	bool			spoofchk;
 	u8			mac[6];
@@ -137,6 +137,8 @@ struct qlcnic_vf_info {
 	u8				pci_func;
 	u16				rx_ctx_id;
 	u16				tx_ctx_id;
+	u16				*sriov_vlans;
+	int				num_vlan;
 	unsigned long			state;
 	struct completion		ch_free_cmpl;
 	struct work_struct		trans_work;
@@ -149,6 +151,7 @@ struct qlcnic_vf_info {
 	struct qlcnic_trans_list	rcv_pend;
 	struct qlcnic_adapter		*adapter;
 	struct qlcnic_vport		*vp;
+	struct mutex			vlan_list_lock;	/* Lock for VLAN list */
 };
 
 struct qlcnic_async_work_list {
@@ -185,7 +188,6 @@ void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *);
 int qlcnic_sriov_vf_init(struct qlcnic_adapter *, int);
 void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *);
 int qlcnic_sriov_func_to_index(struct qlcnic_adapter *, u8);
-int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
 void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *, u32);
 int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *, u8);
 void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *);
@@ -195,8 +197,13 @@ int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *, struct qlcnic_vf_info *,
 int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
 				   struct qlcnic_info *, u16);
 int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
-int qlcnic_sriov_vf_shutdown(struct pci_dev *);
-int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
+void qlcnic_sriov_free_vlans(struct qlcnic_adapter *);
+void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
+bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *);
+void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *,
+			      struct qlcnic_vf_info *, u16);
+void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *,
+			      struct qlcnic_vf_info *, u16);
 
 static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter)
 {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 21a4b274d2e4..17a1ca2050f4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -35,7 +35,10 @@ static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
 static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
 				  struct qlcnic_cmd_args *);
+static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
 static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
+static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
+static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
 
 static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
 	.read_crb			= qlcnic_83xx_read_crb,
@@ -68,6 +71,8 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
 	.change_l2_filter		= qlcnic_83xx_change_l2_filter,
 	.get_board_info			= qlcnic_83xx_get_port_info,
 	.free_mac_list			= qlcnic_sriov_vf_free_mac_list,
+	.enable_sds_intr		= qlcnic_83xx_enable_sds_intr,
+	.disable_sds_intr		= qlcnic_83xx_disable_sds_intr,
 };
 
 static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
@@ -176,6 +181,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
 		vf->adapter = adapter;
 		vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
 		mutex_init(&vf->send_cmd_lock);
+		mutex_init(&vf->vlan_list_lock);
 		INIT_LIST_HEAD(&vf->rcv_act.wait_list);
 		INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
 		spin_lock_init(&vf->rcv_act.lock);
@@ -276,6 +282,11 @@ static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
 
 void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
 {
+	if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
+		return;
+
+	qlcnic_sriov_free_vlans(adapter);
+
 	if (qlcnic_sriov_pf_check(adapter))
 		qlcnic_sriov_pf_cleanup(adapter);
 
@@ -416,10 +427,15 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
 		return 0;
 
 	sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
+	sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
+	dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
+		 sriov->num_allowed_vlans);
+
+	qlcnic_sriov_alloc_vlans(adapter);
+
 	if (!sriov->any_vlan)
 		return 0;
 
-	sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
 	num_vlans = sriov->num_allowed_vlans;
 	sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL);
 	if (!sriov->allowed_vlans)
@@ -473,6 +489,8 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
 	if (err)
 		return err;
 
+	ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters;
+
 	err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
 	if (err)
 		return -EIO;
@@ -500,7 +518,6 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
 static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
 				 int pci_using_dac)
 {
-	struct qlcnic_dcb *dcb;
 	int err;
 
 	INIT_LIST_HEAD(&adapter->vf_mc_list);
@@ -538,11 +555,6 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
 	if (err)
 		goto err_out_send_channel_term;
 
-	dcb = adapter->dcb;
-
-	if (dcb && qlcnic_dcb_attach(dcb))
-		qlcnic_clear_dcb_ops(dcb);
-
 	err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
 	if (err)
 		goto err_out_send_channel_term;
@@ -1417,7 +1429,7 @@ cleanup_transaction:
 	return rsp;
 }
 
-int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
+static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
 {
 	struct qlcnic_cmd_args cmd;
 	struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
@@ -1447,18 +1459,27 @@ out:
 	return ret;
 }
 
-void qlcnic_vf_add_mc_list(struct net_device *netdev, u16 vlan)
+static void qlcnic_vf_add_mc_list(struct net_device *netdev)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
-	struct qlcnic_mac_list_s *cur;
+	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+	struct qlcnic_mac_vlan_list *cur;
 	struct list_head *head, tmp_list;
+	struct qlcnic_vf_info *vf;
+	u16 vlan_id;
+	int i;
 
+	static const u8 bcast_addr[ETH_ALEN] = {
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+	};
+
+	vf = &adapter->ahw->sriov->vf_info[0];
 	INIT_LIST_HEAD(&tmp_list);
 	head = &adapter->vf_mc_list;
 	netif_addr_lock_bh(netdev);
 
 	while (!list_empty(head)) {
-		cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+		cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
 		list_move(&cur->list, &tmp_list);
 	}
 
@@ -1466,8 +1487,28 @@ void qlcnic_vf_add_mc_list(struct net_device *netdev, u16 vlan)
 
 	while (!list_empty(&tmp_list)) {
 		cur = list_entry((&tmp_list)->next,
-				 struct qlcnic_mac_list_s, list);
-		qlcnic_nic_add_mac(adapter, cur->mac_addr, vlan);
+				 struct qlcnic_mac_vlan_list, list);
+		if (!qlcnic_sriov_check_any_vlan(vf)) {
+			qlcnic_nic_add_mac(adapter, bcast_addr, 0);
+			qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
+		} else {
+			mutex_lock(&vf->vlan_list_lock);
+			for (i = 0; i < sriov->num_allowed_vlans; i++) {
+				vlan_id = vf->sriov_vlans[i];
+				if (vlan_id) {
+					qlcnic_nic_add_mac(adapter, bcast_addr,
+							   vlan_id);
+					qlcnic_nic_add_mac(adapter,
+							   cur->mac_addr,
+							   vlan_id);
+				}
+			}
+			mutex_unlock(&vf->vlan_list_lock);
+			if (qlcnic_84xx_check(adapter)) {
+				qlcnic_nic_add_mac(adapter, bcast_addr, 0);
+				qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
+			}
+		}
 		list_del(&cur->list);
 		kfree(cur);
 	}
@@ -1490,13 +1531,24 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
 static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
-	u16 vlan;
+	struct qlcnic_hardware_context *ahw = adapter->ahw;
+	u32 mode = VPORT_MISS_MODE_DROP;
 
 	if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
 		return;
 
-	vlan = adapter->ahw->sriov->vlan;
-	__qlcnic_set_multi(netdev, vlan);
+	if (netdev->flags & IFF_PROMISC) {
+		if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
+			mode = VPORT_MISS_MODE_ACCEPT_ALL;
+	} else if ((netdev->flags & IFF_ALLMULTI) ||
+		   (netdev_mc_count(netdev) > ahw->max_mc_count)) {
+		mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+	}
+
+	if (qlcnic_sriov_vf_check(adapter))
+		qlcnic_vf_add_mc_list(netdev);
+
+	qlcnic_nic_set_promisc(adapter, mode);
 }
 
 static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
@@ -1584,8 +1636,6 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
 	if (err)
 		goto err_out_term_channel;
 
-	qlcnic_dcb_get_info(adapter->dcb);
-
 	return 0;
 
 err_out_term_channel:
@@ -1833,18 +1883,60 @@ static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
 	cancel_delayed_work_sync(&adapter->fw_work);
 }
 
-static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_sriov *sriov,
+static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
+				      struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+	int i, err = -EINVAL;
+
+	if (!vf->sriov_vlans)
+		return err;
+
+	mutex_lock(&vf->vlan_list_lock);
+
+	for (i = 0; i < sriov->num_allowed_vlans; i++) {
+		if (vf->sriov_vlans[i] == vlan_id) {
+			err = 0;
+			break;
+		}
+	}
+
+	mutex_unlock(&vf->vlan_list_lock);
+	return err;
+}
+
+static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
+					   struct qlcnic_vf_info *vf)
+{
+	int err = 0;
+
+	mutex_lock(&vf->vlan_list_lock);
+
+	if (vf->num_vlan >= sriov->num_allowed_vlans)
+		err = -EINVAL;
+
+	mutex_unlock(&vf->vlan_list_lock);
+	return err;
+}
+
+static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter,
 					  u16 vid, u8 enable)
 {
-	u16 vlan = sriov->vlan;
+	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+	struct qlcnic_vf_info *vf;
+	bool vlan_exist;
 	u8 allowed = 0;
 	int i;
 
+	vf = &adapter->ahw->sriov->vf_info[0];
+	vlan_exist = qlcnic_sriov_check_any_vlan(vf);
 	if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
 		return -EINVAL;
 
 	if (enable) {
-		if (vlan)
+		if (qlcnic_83xx_vf_check(adapter) && vlan_exist)
+			return -EINVAL;
+
+		if (qlcnic_sriov_validate_num_vlans(sriov, vf))
 			return -EINVAL;
 
 		if (sriov->any_vlan) {
@@ -1857,24 +1949,54 @@ static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_sriov *sriov,
 				return -EINVAL;
 		}
 	} else {
-		if (!vlan || vlan != vid)
+		if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid))
 			return -EINVAL;
 	}
 
 	return 0;
 }
 
+static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
+					enum qlcnic_vlan_operations opcode)
+{
+	struct qlcnic_adapter *adapter = vf->adapter;
+	struct qlcnic_sriov *sriov;
+
+	sriov = adapter->ahw->sriov;
+
+	if (!vf->sriov_vlans)
+		return;
+
+	mutex_lock(&vf->vlan_list_lock);
+
+	switch (opcode) {
+	case QLC_VLAN_ADD:
+		qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id);
+		break;
+	case QLC_VLAN_DELETE:
+		qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id);
+		break;
+	default:
+		netdev_err(adapter->netdev, "Invalid VLAN operation\n");
+	}
+
+	mutex_unlock(&vf->vlan_list_lock);
+	return;
+}
+
 int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
 				   u16 vid, u8 enable)
 {
 	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+	struct qlcnic_vf_info *vf;
 	struct qlcnic_cmd_args cmd;
 	int ret;
 
 	if (vid == 0)
 		return 0;
 
-	ret = qlcnic_sriov_validate_vlan_cfg(sriov, vid, enable);
+	vf = &adapter->ahw->sriov->vf_info[0];
+	ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable);
 	if (ret)
 		return ret;
 
@@ -1894,11 +2016,11 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
 		qlcnic_free_mac_list(adapter);
 
 		if (enable)
-			sriov->vlan = vid;
+			qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
 		else
-			sriov->vlan = 0;
+			qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
 
-		qlcnic_sriov_vf_set_multi(adapter->netdev);
+		qlcnic_set_multi(adapter->netdev);
 	}
 
 	qlcnic_free_mbx_args(&cmd);
@@ -1908,21 +2030,19 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
 {
 	struct list_head *head = &adapter->mac_list;
-	struct qlcnic_mac_list_s *cur;
-	u16 vlan;
-
-	vlan = adapter->ahw->sriov->vlan;
+	struct qlcnic_mac_vlan_list *cur;
 
 	while (!list_empty(head)) {
-		cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
-		qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
-					  vlan, QLCNIC_MAC_DEL);
+		cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
+		qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id,
+					  QLCNIC_MAC_DEL);
 		list_del(&cur->list);
 		kfree(cur);
 	}
 }
 
-int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
+
+static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
 {
 	struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
 	struct net_device *netdev = adapter->netdev;
@@ -1946,7 +2066,7 @@ int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
 	return 0;
 }
 
-int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
+static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
 {
 	struct qlc_83xx_idc *idc = &adapter->ahw->idc;
 	struct net_device *netdev = adapter->netdev;
@@ -1972,3 +2092,70 @@ int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
 			     idc->delay);
 	return err;
 }
+
+void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+	struct qlcnic_vf_info *vf;
+	int i;
+
+	for (i = 0; i < sriov->num_vfs; i++) {
+		vf = &sriov->vf_info[i];
+		vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
+					  sizeof(*vf->sriov_vlans), GFP_KERNEL);
+	}
+}
+
+void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+	struct qlcnic_vf_info *vf;
+	int i;
+
+	for (i = 0; i < sriov->num_vfs; i++) {
+		vf = &sriov->vf_info[i];
+		kfree(vf->sriov_vlans);
+		vf->sriov_vlans = NULL;
+	}
+}
+
+void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov,
+			      struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+	int i;
+
+	for (i = 0; i < sriov->num_allowed_vlans; i++) {
+		if (!vf->sriov_vlans[i]) {
+			vf->sriov_vlans[i] = vlan_id;
+			vf->num_vlan++;
+			return;
+		}
+	}
+}
+
+void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov,
+			      struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+	int i;
+
+	for (i = 0; i < sriov->num_allowed_vlans; i++) {
+		if (vf->sriov_vlans[i] == vlan_id) {
+			vf->sriov_vlans[i] = 0;
+			vf->num_vlan--;
+			return;
+		}
+	}
+}
+
+bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
+{
+	bool err = false;
+
+	mutex_lock(&vf->vlan_list_lock);
+
+	if (vf->num_vlan)
+		err = true;
+
+	mutex_unlock(&vf->vlan_list_lock);
+	return err;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 024f8161d2fe..09acf15c3a56 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -9,9 +9,14 @@
 #include "qlcnic.h"
 #include <linux/types.h>
 
-#define QLCNIC_SRIOV_VF_MAX_MAC 1
+#define QLCNIC_SRIOV_VF_MAX_MAC 7
 #define QLC_VF_MIN_TX_RATE	100
 #define QLC_VF_MAX_TX_RATE	9999
+#define QLC_MAC_OPCODE_MASK	0x7
+#define QLC_MAC_STAR_ADD	6
+#define QLC_MAC_STAR_DEL	7
+#define QLC_VF_FLOOD_BIT	BIT_16
+#define QLC_FLOOD_MODE		0x5
 
 static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
 
@@ -64,9 +69,10 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
 {
 	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
 	struct qlcnic_resources *res = &sriov->ff_max;
-	u32 temp, num_vf_macs, num_vfs, max;
+	u16 num_macs = sriov->num_allowed_vlans + 1;
 	int ret = -EIO, vpid, id;
 	struct qlcnic_vport *vp;
+	u32 num_vfs, max, temp;
 
 	vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
 	if (vpid < 0)
@@ -75,16 +81,25 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
 	num_vfs = sriov->num_vfs;
 	max = num_vfs + 1;
 	info->bit_offsets = 0xffff;
+	info->max_tx_ques = res->num_tx_queues / max;
+
+	if (qlcnic_83xx_pf_check(adapter))
+		num_macs = 1;
+
 	info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
-	num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
 
 	if (adapter->ahw->pci_func == func) {
-		temp = res->num_rx_mcast_mac_filters - (num_vfs * num_vf_macs);
-		info->max_rx_ucast_mac_filters = temp;
-		temp = res->num_tx_mac_filters - (num_vfs * num_vf_macs);
-		info->max_tx_mac_filters = temp;
 		info->min_tx_bw = 0;
 		info->max_tx_bw = MAX_BW;
+
+		temp = res->num_rx_ucast_mac_filters - num_macs * num_vfs;
+		info->max_rx_ucast_mac_filters = temp;
+		temp = res->num_tx_mac_filters - num_macs * num_vfs;
+		info->max_tx_mac_filters = temp;
+		temp = num_macs * num_vfs * QLCNIC_SRIOV_VF_MAX_MAC;
+		temp = res->num_rx_mcast_mac_filters - temp;
+		info->max_rx_mcast_mac_filters = temp;
+
 		info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
 	} else {
 		id = qlcnic_sriov_func_to_index(adapter, func);
@@ -93,8 +108,12 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
 		vp = sriov->vf_info[id].vp;
 		info->min_tx_bw = vp->min_tx_bw;
 		info->max_tx_bw = vp->max_tx_bw;
-		info->max_rx_ucast_mac_filters = num_vf_macs;
-		info->max_tx_mac_filters = num_vf_macs;
+
+		info->max_rx_ucast_mac_filters = num_macs;
+		info->max_tx_mac_filters = num_macs;
+		temp = num_macs * QLCNIC_SRIOV_VF_MAX_MAC;
+		info->max_rx_mcast_mac_filters = temp;
+
 		info->max_tx_ques = QLCNIC_SINGLE_RING;
 	}
 
@@ -133,6 +152,25 @@ static void qlcnic_sriov_pf_set_ff_max_res(struct qlcnic_adapter *adapter,
 	ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs;
 }
 
+static void qlcnic_sriov_set_vf_max_vlan(struct qlcnic_adapter *adapter,
+					 struct qlcnic_info *npar_info)
+{
+	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+	int temp, total_fn;
+
+	temp = npar_info->max_rx_mcast_mac_filters;
+	total_fn = sriov->num_vfs + 1;
+
+	temp = temp / (QLCNIC_SRIOV_VF_MAX_MAC * total_fn);
+	sriov->num_allowed_vlans = temp - 1;
+
+	if (qlcnic_83xx_pf_check(adapter))
+		sriov->num_allowed_vlans = 1;
+
+	netdev_info(adapter->netdev, "Max Guest VLANs supported per VF = %d\n",
+		    sriov->num_allowed_vlans);
+}
+
 static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
 				    struct qlcnic_info *npar_info)
 {
@@ -166,6 +204,7 @@ static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
 	npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]);
 	npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]);
 
+	qlcnic_sriov_set_vf_max_vlan(adapter, npar_info);
 	qlcnic_sriov_pf_set_ff_max_res(adapter, npar_info);
 	dev_info(&adapter->pdev->dev,
 		 "\n\ttotal_pf: %d,\n"
@@ -310,6 +349,28 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
 	return err;
 }
 
+/* On configuring VF flood bit, PFD will receive traffic from all VFs */
+static int qlcnic_sriov_pf_cfg_flood(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_cmd_args cmd;
+	int err;
+
+	err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+	if (err)
+		return err;
+
+	cmd.req.arg[1] = QLC_FLOOD_MODE | QLC_VF_FLOOD_BIT;
+
+	err = qlcnic_issue_cmd(adapter, &cmd);
+	if (err)
+		dev_err(&adapter->pdev->dev,
+			"Failed to configure VF Flood bit on PF, err=%d\n",
+			err);
+
+	qlcnic_free_mbx_args(&cmd);
+	return err;
+}
+
 static int qlcnic_sriov_pf_cfg_eswitch(struct qlcnic_adapter *adapter,
 				       u8 func, u8 enable)
 {
@@ -404,6 +465,8 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
 
 	qlcnic_sriov_pf_disable(adapter);
 
+	qlcnic_sriov_free_vlans(adapter);
+
 	qlcnic_sriov_pf_cleanup(adapter);
 
 	/* After disabling SRIOV re-init the driver in default mode
@@ -435,6 +498,12 @@ static int qlcnic_sriov_pf_init(struct qlcnic_adapter *adapter)
 	if (err)
 		return err;
 
+	if (qlcnic_84xx_check(adapter)) {
+		err = qlcnic_sriov_pf_cfg_flood(adapter);
+		if (err)
+			goto disable_vlan_filtering;
+	}
+
 	err = qlcnic_sriov_pf_cfg_eswitch(adapter, func, 1);
 	if (err)
 		goto disable_vlan_filtering;
@@ -512,6 +581,8 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
 	if (err)
 		goto del_flr_queue;
 
+	qlcnic_sriov_alloc_vlans(adapter);
+
 	err = qlcnic_sriov_pf_enable(adapter, num_vfs);
 	return err;
 
@@ -609,7 +680,7 @@ static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func)
 
 	if (vp->vlan_mode == QLC_PVID_MODE) {
 		cmd.req.arg[2] |= BIT_6;
-		cmd.req.arg[3] |= vp->vlan << 8;
+		cmd.req.arg[3] |= vp->pvid << 8;
 	}
 
 	err = qlcnic_issue_cmd(adapter, &cmd);
@@ -644,10 +715,13 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
 	struct qlcnic_vf_info *vf = trans->vf;
 	struct qlcnic_vport *vp = vf->vp;
 	struct qlcnic_adapter *adapter;
+	struct qlcnic_sriov *sriov;
 	u16 func = vf->pci_func;
+	size_t size;
 	int err;
 
 	adapter = vf->adapter;
+	sriov = adapter->ahw->sriov;
 
 	if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
 		err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
@@ -657,8 +731,12 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
 				qlcnic_sriov_pf_config_vport(adapter, 0, func);
 		}
 	} else {
-		if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
-			vp->vlan = 0;
+		if (vp->vlan_mode == QLC_GUEST_VLAN_MODE) {
+			size = sizeof(*vf->sriov_vlans);
+			size = size * sriov->num_allowed_vlans;
+			memset(vf->sriov_vlans, 0, size);
+		}
+
 		err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
 	}
 
@@ -680,20 +758,23 @@ err_out:
 }
 
 static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
-				       struct qlcnic_vport *vp,
-				       u16 func, u16 vlan, u8 op)
+				       struct qlcnic_vf_info *vf,
+				       u16 vlan, u8 op)
 {
 	struct qlcnic_cmd_args cmd;
 	struct qlcnic_macvlan_mbx mv;
+	struct qlcnic_vport *vp;
 	u8 *addr;
 	int err;
 	u32 *buf;
 	int vpid;
 
+	vp = vf->vp;
+
 	if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN))
 		return -ENOMEM;
 
-	vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+	vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
 	if (vpid < 0) {
 		err = -EINVAL;
 		goto out;
@@ -737,6 +818,35 @@ static int qlcnic_sriov_validate_create_rx_ctx(struct qlcnic_cmd_args *cmd)
 	return 0;
 }
 
+static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
+					     struct qlcnic_vf_info *vf,
+					     int opcode)
+{
+	struct qlcnic_sriov *sriov;
+	u16 vlan;
+	int i;
+
+	sriov = adapter->ahw->sriov;
+
+	mutex_lock(&vf->vlan_list_lock);
+	if (vf->num_vlan) {
+		for (i = 0; i < sriov->num_allowed_vlans; i++) {
+			vlan = vf->sriov_vlans[i];
+			if (vlan)
+				qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan,
+							    opcode);
+		}
+	}
+	mutex_unlock(&vf->vlan_list_lock);
+
+	if (vf->vp->vlan_mode != QLC_PVID_MODE) {
+		if (qlcnic_83xx_pf_check(adapter) &&
+		    qlcnic_sriov_check_any_vlan(vf))
+			return;
+		qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0, opcode);
+	}
+}
+
 static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
 					     struct qlcnic_cmd_args *cmd)
 {
@@ -744,7 +854,6 @@ static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
 	struct qlcnic_adapter *adapter = vf->adapter;
 	struct qlcnic_rcv_mbx_out *mbx_out;
 	int err;
-	u16 vlan;
 
 	err = qlcnic_sriov_validate_create_rx_ctx(cmd);
 	if (err) {
@@ -755,12 +864,10 @@ static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
 	cmd->req.arg[6] = vf->vp->handle;
 	err = qlcnic_issue_cmd(adapter, cmd);
 
-	vlan = vf->vp->vlan;
 	if (!err) {
 		mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1];
 		vf->rx_ctx_id = mbx_out->ctx_id;
-		qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
-					    vlan, QLCNIC_MAC_ADD);
+		qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_ADD);
 	} else {
 		vf->rx_ctx_id = 0;
 	}
@@ -844,7 +951,6 @@ static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans,
 	struct qlcnic_vf_info *vf = trans->vf;
 	struct qlcnic_adapter *adapter = vf->adapter;
 	int err;
-	u16 vlan;
 
 	err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd);
 	if (err) {
@@ -852,9 +958,7 @@ static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans,
 		return err;
 	}
 
-	vlan = vf->vp->vlan;
-	qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
-				    vlan, QLCNIC_MAC_DEL);
+	qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_DEL);
 	cmd->req.arg[1] |= vf->vp->handle << 16;
 	err = qlcnic_issue_cmd(adapter, cmd);
 
@@ -1102,6 +1206,13 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
 	struct qlcnic_vport *vp = vf->vp;
 	u8 op, new_op;
 
+	if (((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_ADD) ||
+	    ((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_DEL)) {
+		netdev_err(adapter->netdev, "MAC + any VLAN filter not allowed from VF %d\n",
+			   vf->pci_func);
+		return -EINVAL;
+	}
+
 	if (!(cmd->req.arg[1] & BIT_8))
 		return -EINVAL;
 
@@ -1121,7 +1232,7 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
 		cmd->req.arg[1] &= ~0x7;
 		new_op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
 			 QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
-		cmd->req.arg[3] |= vp->vlan << 16;
+		cmd->req.arg[3] |= vp->pvid << 16;
 		cmd->req.arg[1] |= new_op;
 	}
 
@@ -1191,8 +1302,10 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
 	struct qlcnic_vport *vp = vf->vp;
 	u8 cmd_op, mode = vp->vlan_mode;
 	struct qlcnic_adapter *adapter;
+	struct qlcnic_sriov *sriov;
 
 	adapter = vf->adapter;
+	sriov = adapter->ahw->sriov;
 
 	cmd_op = trans->req_hdr->cmd_op;
 	cmd->rsp.arg[0] |= 1 << 25;
@@ -1206,10 +1319,10 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
 	switch (mode) {
 	case QLC_GUEST_VLAN_MODE:
 		cmd->rsp.arg[1] = mode | 1 << 8;
-		cmd->rsp.arg[2] = 1 << 16;
+		cmd->rsp.arg[2] = sriov->num_allowed_vlans << 16;
 		break;
 	case QLC_PVID_MODE:
-		cmd->rsp.arg[1] = mode | 1 << 8 | vp->vlan << 16;
+		cmd->rsp.arg[1] = mode | 1 << 8 | vp->pvid << 16;
 		break;
 	}
 
@@ -1217,24 +1330,27 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
 }
 
 static int qlcnic_sriov_pf_del_guest_vlan(struct qlcnic_adapter *adapter,
-					  struct qlcnic_vf_info *vf)
-
+					  struct qlcnic_vf_info *vf,
+					  struct qlcnic_cmd_args *cmd)
 {
-	struct qlcnic_vport *vp = vf->vp;
+	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+	u16 vlan;
 
-	if (!vp->vlan)
+	if (!qlcnic_sriov_check_any_vlan(vf))
 		return -EINVAL;
 
+	vlan = cmd->req.arg[1] >> 16;
 	if (!vf->rx_ctx_id) {
-		vp->vlan = 0;
+		qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
 		return 0;
 	}
 
-	qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
-				    vp->vlan, QLCNIC_MAC_DEL);
-	vp->vlan = 0;
-	qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
-				    0, QLCNIC_MAC_ADD);
+	qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_DEL);
+	qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
+
+	if (qlcnic_83xx_pf_check(adapter))
+		qlcnic_sriov_cfg_vf_def_mac(adapter, vf,
+					    0, QLCNIC_MAC_ADD);
 	return 0;
 }
 
@@ -1242,32 +1358,37 @@ static int qlcnic_sriov_pf_add_guest_vlan(struct qlcnic_adapter *adapter,
 					  struct qlcnic_vf_info *vf,
 					  struct qlcnic_cmd_args *cmd)
 {
-	struct qlcnic_vport *vp = vf->vp;
+	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
 	int err = -EIO;
+	u16 vlan;
 
-	if (vp->vlan)
+	if (qlcnic_83xx_pf_check(adapter) && qlcnic_sriov_check_any_vlan(vf))
 		return err;
 
+	vlan = cmd->req.arg[1] >> 16;
+
 	if (!vf->rx_ctx_id) {
-		vp->vlan = cmd->req.arg[1] >> 16;
+		qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
 		return 0;
 	}
 
-	err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
-					  0, QLCNIC_MAC_DEL);
-	if (err)
-		return err;
+	if (qlcnic_83xx_pf_check(adapter)) {
+		err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+						  QLCNIC_MAC_DEL);
+		if (err)
+			return err;
+	}
 
-	vp->vlan = cmd->req.arg[1] >> 16;
-	err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
-					  vp->vlan, QLCNIC_MAC_ADD);
+	err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_ADD);
 
 	if (err) {
-		qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
-					    0, QLCNIC_MAC_ADD);
-		vp->vlan = 0;
+		if (qlcnic_83xx_pf_check(adapter))
+			qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+						    QLCNIC_MAC_ADD);
+		return err;
 	}
 
+	qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
 	return err;
 }
 
@@ -1290,7 +1411,7 @@ static int qlcnic_sriov_pf_cfg_guest_vlan_cmd(struct qlcnic_bc_trans *tran,
 	if (op)
 		err = qlcnic_sriov_pf_add_guest_vlan(adapter, vf, cmd);
 	else
-		err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf);
+		err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf, cmd);
 
 	cmd->rsp.arg[0] |= err ? 2 << 25 : 1 << 25;
 	return err;
@@ -1300,8 +1421,6 @@ static const int qlcnic_pf_passthru_supp_cmds[] = {
 	QLCNIC_CMD_GET_STATISTICS,
 	QLCNIC_CMD_GET_PORT_CONFIG,
 	QLCNIC_CMD_GET_LINK_STATUS,
-	QLCNIC_CMD_DCB_QUERY_CAP,
-	QLCNIC_CMD_DCB_QUERY_PARAM,
 	QLCNIC_CMD_INIT_NIC_FUNC,
 	QLCNIC_CMD_STOP_NIC_FUNC,
 };
@@ -1597,7 +1716,8 @@ void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
 	}
 
 	if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
-		vp->vlan = 0;
+		memset(vf->sriov_vlans, 0,
+		       sizeof(*vf->sriov_vlans) * sriov->num_allowed_vlans);
 
 	qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
 	netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func);
@@ -1767,20 +1887,22 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
 		return -EOPNOTSUPP;
 	}
 
+	memset(vf_info->sriov_vlans, 0,
+	       sizeof(*vf_info->sriov_vlans) * sriov->num_allowed_vlans);
+
 	switch (vlan) {
 	case 4095:
-		vp->vlan = 0;
 		vp->vlan_mode = QLC_GUEST_VLAN_MODE;
 		break;
 	case 0:
 		vp->vlan_mode = QLC_NO_VLAN_MODE;
-		vp->vlan = 0;
 		vp->qos = 0;
 		break;
 	default:
 		vp->vlan_mode = QLC_PVID_MODE;
-		vp->vlan = vlan;
+		qlcnic_sriov_add_vlan_id(sriov, vf_info, vlan);
 		vp->qos = qos;
+		vp->pvid = vlan;
 	}
 
 	netdev_info(netdev, "Setting VLAN %d, QoS %d, for VF %d\n",
@@ -1795,7 +1917,7 @@ static __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter,
 
 	switch (vp->vlan_mode) {
 	case QLC_PVID_MODE:
-		vlan = vp->vlan;
+		vlan = vp->pvid;
 		break;
 	case QLC_GUEST_VLAN_MODE:
 		vlan = MAX_VLAN_ID;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 1a9f8a400e50..3d64113a35af 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -6,7 +6,6 @@
  */
 
 #include <linux/slab.h>
-#include <linux/vmalloc.h>
 #include <linux/interrupt.h>
 
 #include "qlcnic.h"
@@ -127,6 +126,8 @@ static int qlcnic_83xx_store_beacon(struct qlcnic_adapter *adapter,
 	if (kstrtoul(buf, 2, &h_beacon))
 		return -EINVAL;
 
+	qlcnic_get_beacon_state(adapter);
+
 	if (ahw->beacon_state == h_beacon)
 		return len;
 
@@ -158,7 +159,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	int err, drv_sds_rings = adapter->drv_sds_rings;
 	u16 beacon;
-	u8 h_beacon_state, b_state, b_rate;
+	u8 b_state, b_rate;
 
 	if (len != sizeof(u16))
 		return QL_STATUS_INVALID_PARAM;
@@ -168,18 +169,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
 	if (err)
 		return err;
 
-	if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
-		err = qlcnic_get_beacon_state(adapter, &h_beacon_state);
-		if (err) {
-			netdev_err(adapter->netdev,
-				   "Failed to get current beacon state\n");
-		} else {
-			if (h_beacon_state == QLCNIC_BEACON_DISABLE)
-				ahw->beacon_state = 0;
-			else if (h_beacon_state == QLCNIC_BEACON_EANBLE)
-				ahw->beacon_state = 2;
-		}
-	}
+	qlcnic_get_beacon_state(adapter);
 
 	if (ahw->beacon_state == b_state)
 		return len;
@@ -360,10 +350,28 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
 	return size;
 }
 
-static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
+static u32 qlcnic_get_pci_func_count(struct qlcnic_adapter *adapter)
 {
+	struct qlcnic_hardware_context *ahw = adapter->ahw;
+	u32 count = 0;
+
+	if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+		return ahw->total_nic_func;
+
+	if (ahw->total_pci_func <= QLC_DEFAULT_VNIC_COUNT)
+		count = QLC_DEFAULT_VNIC_COUNT;
+	else
+		count = ahw->max_vnic_func;
+
+	return count;
+}
+
+int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
+{
+	u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
 	int i;
-	for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+
+	for (i = 0; i < pci_func_count; i++) {
 		if (adapter->npars[i].pci_func == pci_func)
 			return i;
 	}
@@ -382,7 +390,6 @@ static int validate_pm_config(struct qlcnic_adapter *adapter,
 		src_pci_func = pm_cfg[i].pci_func;
 		dest_pci_func = pm_cfg[i].dest_npar;
 		src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
-
 		if (src_index < 0)
 			return QL_STATUS_INVALID_PARAM;
 
@@ -439,6 +446,8 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
 	for (i = 0; i < count; i++) {
 		pci_func = pm_cfg[i].pci_func;
 		index = qlcnic_is_valid_nic_func(adapter, pci_func);
+		if (index < 0)
+			return QL_STATUS_INVALID_PARAM;
 		id = adapter->npars[index].phy_port;
 		adapter->npars[index].enable_pm = !!pm_cfg[i].action;
 		adapter->npars[index].dest_npar = id;
@@ -455,17 +464,19 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
-	int i;
+	u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+	struct qlcnic_pm_func_cfg *pm_cfg;
+	int i, pm_cfg_size;
 	u8 pci_func;
 
-	if (size != sizeof(pm_cfg))
+	pm_cfg_size = pci_func_count * sizeof(*pm_cfg);
+	if (size != pm_cfg_size)
 		return QL_STATUS_INVALID_PARAM;
 
-	memset(&pm_cfg, 0,
-	       sizeof(struct qlcnic_pm_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+	memset(buf, 0, pm_cfg_size);
+	pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
 
-	for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+	for (i = 0; i < pci_func_count; i++) {
 		pci_func = adapter->npars[i].pci_func;
 		if (!adapter->npars[i].active)
 			continue;
@@ -477,26 +488,26 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
 		pm_cfg[pci_func].dest_npar = 0;
 		pm_cfg[pci_func].pci_func = i;
 	}
-	memcpy(buf, &pm_cfg, size);
-
 	return size;
 }
 
 static int validate_esw_config(struct qlcnic_adapter *adapter,
 			       struct qlcnic_esw_func_cfg *esw_cfg, int count)
 {
+	u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+	struct qlcnic_hardware_context *ahw = adapter->ahw;
+	int i, ret;
 	u32 op_mode;
 	u8 pci_func;
-	int i, ret;
 
 	if (qlcnic_82xx_check(adapter))
-		op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+		op_mode = readl(ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
 	else
-		op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+		op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
 
 	for (i = 0; i < count; i++) {
 		pci_func = esw_cfg[i].pci_func;
-		if (pci_func >= QLCNIC_MAX_PCI_FUNC)
+		if (pci_func >= pci_func_count)
 			return QL_STATUS_INVALID_PARAM;
 
 		if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
@@ -600,6 +611,8 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
 	for (i = 0; i < count; i++) {
 		pci_func = esw_cfg[i].pci_func;
 		index = qlcnic_is_valid_nic_func(adapter, pci_func);
+		if (index < 0)
+			return QL_STATUS_INVALID_PARAM;
 		npar = &adapter->npars[index];
 		switch (esw_cfg[i].op_mode) {
 		case QLCNIC_PORT_DEFAULTS:
@@ -629,16 +642,19 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
+	u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+	struct qlcnic_esw_func_cfg *esw_cfg;
+	size_t esw_cfg_size;
 	u8 i, pci_func;
 
-	if (size != sizeof(esw_cfg))
+	esw_cfg_size = pci_func_count * sizeof(*esw_cfg);
+	if (size != esw_cfg_size)
 		return QL_STATUS_INVALID_PARAM;
 
-	memset(&esw_cfg, 0,
-	       sizeof(struct qlcnic_esw_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+	memset(buf, 0, esw_cfg_size);
+	esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
 
-	for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+	for (i = 0; i < pci_func_count; i++) {
 		pci_func = adapter->npars[i].pci_func;
 		if (!adapter->npars[i].active)
 			continue;
@@ -650,9 +666,6 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
 		if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
 			return QL_STATUS_INVALID_PARAM;
 	}
-
-	memcpy(buf, &esw_cfg, size);
-
 	return size;
 }
 
@@ -711,6 +724,8 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
 		if (ret)
 			return ret;
 		index = qlcnic_is_valid_nic_func(adapter, pci_func);
+		if (index < 0)
+			return QL_STATUS_INVALID_PARAM;
 		adapter->npars[index].min_bw = nic_info.min_tx_bw;
 		adapter->npars[index].max_bw = nic_info.max_tx_bw;
 	}
@@ -726,27 +741,28 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+	struct qlcnic_npar_func_cfg *np_cfg;
 	struct qlcnic_info nic_info;
-	struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
+	size_t np_cfg_size;
 	int i, ret;
 
-	if (size != sizeof(np_cfg))
+	np_cfg_size = pci_func_count * sizeof(*np_cfg);
+	if (size != np_cfg_size)
 		return QL_STATUS_INVALID_PARAM;
 
 	memset(&nic_info, 0, sizeof(struct qlcnic_info));
-	memset(&np_cfg, 0,
-	       sizeof(struct qlcnic_npar_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+	memset(buf, 0, np_cfg_size);
+	np_cfg = (struct qlcnic_npar_func_cfg *)buf;
 
-	for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+	for (i = 0; i < pci_func_count; i++) {
 		if (qlcnic_is_valid_nic_func(adapter, i) < 0)
 			continue;
 		ret = qlcnic_get_nic_info(adapter, &nic_info, i);
 		if (ret)
 			return ret;
-
 		if (!adapter->npars[i].eswitch_status)
 			continue;
-
 		np_cfg[i].pci_func = i;
 		np_cfg[i].op_mode = (u8)nic_info.op_mode;
 		np_cfg[i].port_num = nic_info.phys_port;
@@ -756,8 +772,6 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
 		np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
 		np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
 	}
-
-	memcpy(buf, &np_cfg, size);
 	return size;
 }
 
@@ -769,6 +783,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
 	struct qlcnic_esw_statistics port_stats;
 	int ret;
 
@@ -778,7 +793,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
 	if (size != sizeof(struct qlcnic_esw_statistics))
 		return QL_STATUS_INVALID_PARAM;
 
-	if (offset >= QLCNIC_MAX_PCI_FUNC)
+	if (offset >= pci_func_count)
 		return QL_STATUS_INVALID_PARAM;
 
 	memset(&port_stats, 0, size);
@@ -869,12 +884,13 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
 
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+	u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
 	int ret;
 
 	if (qlcnic_83xx_check(adapter))
 		return QLC_STATUS_UNSUPPORTED_CMD;
 
-	if (offset >= QLCNIC_MAX_PCI_FUNC)
+	if (offset >= pci_func_count)
 		return QL_STATUS_INVALID_PARAM;
 
 	ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
@@ -898,14 +914,17 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-	struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
+	u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
+	struct qlcnic_pci_func_cfg *pci_cfg;
 	struct qlcnic_pci_info *pci_info;
+	size_t pci_cfg_sz;
 	int i, ret;
 
-	if (size != sizeof(pci_cfg))
+	pci_cfg_sz = pci_func_count * sizeof(*pci_cfg);
+	if (size != pci_cfg_sz)
 		return QL_STATUS_INVALID_PARAM;
 
-	pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+	pci_info = kcalloc(pci_func_count, sizeof(*pci_info), GFP_KERNEL);
 	if (!pci_info)
 		return -ENOMEM;
 
@@ -915,19 +934,17 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
 		return ret;
 	}
 
-	memset(&pci_cfg, 0,
-	       sizeof(struct qlcnic_pci_func_cfg) * QLCNIC_MAX_PCI_FUNC);
-
-	for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+	pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
+	for (i = 0; i < pci_func_count; i++) {
 		pci_cfg[i].pci_func = pci_info[i].id;
 		pci_cfg[i].func_type = pci_info[i].type;
+		pci_cfg[i].func_state = 0;
 		pci_cfg[i].port_num = pci_info[i].default_port;
 		pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
 		pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
 		memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
 	}
 
-	memcpy(buf, &pci_cfg, size);
 	kfree(pci_info);
 	return size;
 }
@@ -1269,7 +1286,7 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
 		device_remove_file(dev, &dev_attr_bridged_mode);
 }
 
-void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
+static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
 {
 	struct device *dev = &adapter->pdev->dev;
 
@@ -1308,7 +1325,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
 		dev_info(dev, "failed to create eswitch stats sysfs entry");
 }
 
-void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
+static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
 {
 	struct device *dev = &adapter->pdev->dev;
 
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 03517478e589..ef332708e5f2 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -2248,7 +2248,6 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev);
 int ql_mb_set_port_cfg(struct ql_adapter *qdev);
 int ql_wait_fifo_empty(struct ql_adapter *qdev);
 void ql_get_dump(struct ql_adapter *qdev, void *buff);
-void ql_gen_reg_dump(struct ql_adapter *qdev, struct ql_reg_dump *mpi_coredump);
 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
 void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
 int ql_own_firmware(struct ql_adapter *qdev);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 6bc5db703920..829be21f97b2 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -1242,8 +1242,8 @@ static void ql_get_core_dump(struct ql_adapter *qdev)
 	ql_queue_fw_error(qdev);
 }
 
-void ql_gen_reg_dump(struct ql_adapter *qdev,
-			struct ql_reg_dump *mpi_coredump)
+static void ql_gen_reg_dump(struct ql_adapter *qdev,
+			    struct ql_reg_dump *mpi_coredump)
 {
 	int i, status;
 
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 8dee1beb9854..c3c514e332b5 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -1,5 +1,4 @@
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/types.h>
 #include <linux/module.h>
 #include <linux/list.h>
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index f705aeeba767..ce2cfddbed50 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -6,7 +6,6 @@
  *                      Ron Mercer <ron.mercer@qlogic.com>
  */
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/bitops.h>
 #include <linux/types.h>
 #include <linux/module.h>
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 1e49ec5b2232..819b74cefd64 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -34,7 +34,6 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/mii.h>
 #include <linux/ethtool.h>
@@ -222,6 +221,7 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
 		cmd = ioread16(ioaddr + MMDIO);
 		if (!(cmd & MDIO_READ))
 			break;
+		udelay(1);
 	}
 
 	if (limit < 0)
@@ -245,6 +245,7 @@ static int r6040_phy_write(void __iomem *ioaddr,
 		cmd = ioread16(ioaddr + MMDIO);
 		if (!(cmd & MDIO_WRITE))
 			break;
+		udelay(1);
 	}
 
 	return (limit < 0) ? -ETIMEDOUT : 0;
@@ -834,8 +835,8 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
 	/* Set TX descriptor & Transmit it */
 	lp->tx_free_desc--;
 	descptr = lp->tx_insert_ptr;
-	if (skb->len < MISR)
-		descptr->len = MISR;
+	if (skb->len < ETH_ZLEN)
+		descptr->len = ETH_ZLEN;
 	else
 		descptr->len = skb->len;
 
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index c737f0ea5de7..91a67ae8f17b 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -21,7 +21,6 @@
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/dma-mapping.h>
 #include <linux/pm_runtime.h>
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index a30c4395b232..9e757c792d84 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -13,4 +13,4 @@ config SH_ETH
 	  Renesas SuperH Ethernet device driver.
 	  This driver supporting CPUs are:
 		- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
-		  R8A7740, R8A777x and R8A7790.
+		  R8A7740, R8A777x and R8A779x.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index d256ce19d4de..040cb94e8219 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,5 +1,4 @@
-/*
- *  SuperH Ethernet device driver
+/*  SuperH Ethernet device driver
  *
  *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
  *  Copyright (C) 2008-2013 Renesas Solutions Corp.
@@ -13,15 +12,11 @@
  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  *  more details.
- *  You should have received a copy of the GNU General Public License along with
- *  this program; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  *
  *  The full GNU General Public License is included in this distribution in
  *  the file called "COPYING".
  */
 
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
@@ -148,6 +143,65 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
 	[FWALCR1]	= 0x00b4,
 };
 
+static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
+	[EDSR]		= 0x0000,
+	[EDMR]		= 0x0400,
+	[EDTRR]		= 0x0408,
+	[EDRRR]		= 0x0410,
+	[EESR]		= 0x0428,
+	[EESIPR]	= 0x0430,
+	[TDLAR]		= 0x0010,
+	[TDFAR]		= 0x0014,
+	[TDFXR]		= 0x0018,
+	[TDFFR]		= 0x001c,
+	[RDLAR]		= 0x0030,
+	[RDFAR]		= 0x0034,
+	[RDFXR]		= 0x0038,
+	[RDFFR]		= 0x003c,
+	[TRSCER]	= 0x0438,
+	[RMFCR]		= 0x0440,
+	[TFTR]		= 0x0448,
+	[FDR]		= 0x0450,
+	[RMCR]		= 0x0458,
+	[RPADIR]	= 0x0460,
+	[FCFTR]		= 0x0468,
+	[CSMR]		= 0x04E4,
+
+	[ECMR]		= 0x0500,
+	[RFLR]		= 0x0508,
+	[ECSR]		= 0x0510,
+	[ECSIPR]	= 0x0518,
+	[PIR]		= 0x0520,
+	[APR]		= 0x0554,
+	[MPR]		= 0x0558,
+	[PFTCR]		= 0x055c,
+	[PFRCR]		= 0x0560,
+	[TPAUSER]	= 0x0564,
+	[MAHR]		= 0x05c0,
+	[MALR]		= 0x05c8,
+	[CEFCR]		= 0x0740,
+	[FRECR]		= 0x0748,
+	[TSFRCR]	= 0x0750,
+	[TLFRCR]	= 0x0758,
+	[RFCR]		= 0x0760,
+	[MAFCR]		= 0x0778,
+
+	[ARSTR]		= 0x0000,
+	[TSU_CTRST]	= 0x0004,
+	[TSU_VTAG0]	= 0x0058,
+	[TSU_ADSBSY]	= 0x0060,
+	[TSU_TEN]	= 0x0064,
+	[TSU_ADRH0]	= 0x0100,
+	[TSU_ADRL0]	= 0x0104,
+	[TSU_ADRH31]	= 0x01f8,
+	[TSU_ADRL31]	= 0x01fc,
+
+	[TXNLCR0]	= 0x0080,
+	[TXALCR0]	= 0x0084,
+	[RXNLCR0]	= 0x0088,
+	[RXALCR0]	= 0x008C,
+};
+
 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
 	[ECMR]		= 0x0300,
 	[RFLR]		= 0x0308,
@@ -314,12 +368,14 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
 	[TSU_ADRL31]	= 0x01fc,
 };
 
-static int sh_eth_is_gether(struct sh_eth_private *mdp)
+static bool sh_eth_is_gether(struct sh_eth_private *mdp)
 {
-	if (mdp->reg_offset == sh_eth_offset_gigabit)
-		return 1;
-	else
-		return 0;
+	return mdp->reg_offset == sh_eth_offset_gigabit;
+}
+
+static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
+{
+	return mdp->reg_offset == sh_eth_offset_fast_rz;
 }
 
 static void sh_eth_select_mii(struct net_device *ndev)
@@ -395,8 +451,8 @@ static struct sh_eth_cpu_data r8a777x_data = {
 	.hw_swap	= 1,
 };
 
-/* R8A7790 */
-static struct sh_eth_cpu_data r8a7790_data = {
+/* R8A7790/1 */
+static struct sh_eth_cpu_data r8a779x_data = {
 	.set_duplex	= sh_eth_set_duplex,
 	.set_rate	= sh_eth_set_rate_r8a777x,
 
@@ -646,8 +702,8 @@ static struct sh_eth_cpu_data sh7763_data = {
 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 
 	.tx_check	= EESR_TC1 | EESR_FTC,
-	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
-			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
 			  EESR_ECI,
 
 	.apr		= 1,
@@ -705,6 +761,38 @@ static struct sh_eth_cpu_data r8a7740_data = {
 	.shift_rd0	= 1,
 };
 
+/* R7S72100 */
+static struct sh_eth_cpu_data r7s72100_data = {
+	.chip_reset	= sh_eth_chip_reset,
+	.set_duplex	= sh_eth_set_duplex,
+
+	.register_type	= SH_ETH_REG_FAST_RZ,
+
+	.ecsr_value	= ECSR_ICD,
+	.ecsipr_value	= ECSIPR_ICDIP,
+	.eesipr_value	= 0xff7f009f,
+
+	.tx_check	= EESR_TC1 | EESR_FTC,
+	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
+			  EESR_TDE | EESR_ECI,
+	.fdr_value	= 0x0000070f,
+	.rmcr_value	= RMCR_RNC,
+
+	.no_psr		= 1,
+	.apr		= 1,
+	.mpr		= 1,
+	.tpauser	= 1,
+	.hw_swap	= 1,
+	.rpadir		= 1,
+	.rpadir_value   = 2 << 16,
+	.no_trimd	= 1,
+	.no_ade		= 1,
+	.hw_crc		= 1,
+	.tsu		= 1,
+	.shift_rd0	= 1,
+};
+
 static struct sh_eth_cpu_data sh7619_data = {
 	.register_type	= SH_ETH_REG_FAST_SH3_SH2,
 
@@ -732,7 +820,7 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
 		cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
 
 	if (!cd->fcftr_value)
-		cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
+		cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
 				  DEFAULT_FIFO_F_D_RFD;
 
 	if (!cd->fdr_value)
@@ -771,7 +859,7 @@ static int sh_eth_reset(struct net_device *ndev)
 	struct sh_eth_private *mdp = netdev_priv(ndev);
 	int ret = 0;
 
-	if (sh_eth_is_gether(mdp)) {
+	if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
 		sh_eth_write(ndev, EDSR_ENALL, EDSR);
 		sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
 			     EDMR);
@@ -849,20 +937,17 @@ static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
 	return x;
 }
 
-/*
- * Program the hardware MAC address from dev->dev_addr.
- */
+/* Program the hardware MAC address from dev->dev_addr. */
 static void update_mac_address(struct net_device *ndev)
 {
 	sh_eth_write(ndev,
-		(ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
-		(ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
+		     (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+		     (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
 	sh_eth_write(ndev,
-		(ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
+		     (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
 }
 
-/*
- * Get MAC address from SuperH MAC address register
+/* Get MAC address from SuperH MAC address register
  *
  * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
  * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
@@ -885,7 +970,7 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac)
 
 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
 {
-	if (sh_eth_is_gether(mdp))
+	if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
 		return EDTRR_TRNS_GETHER;
 	else
 		return EDTRR_TRNS_ETHER;
@@ -1019,8 +1104,10 @@ static void sh_eth_ring_format(struct net_device *ndev)
 	int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
 	int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
 
-	mdp->cur_rx = mdp->cur_tx = 0;
-	mdp->dirty_rx = mdp->dirty_tx = 0;
+	mdp->cur_rx = 0;
+	mdp->cur_tx = 0;
+	mdp->dirty_rx = 0;
+	mdp->dirty_tx = 0;
 
 	memset(mdp->rx_ring, 0, rx_ringsize);
 
@@ -1033,7 +1120,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
 		if (skb == NULL)
 			break;
 		dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
-				DMA_FROM_DEVICE);
+			       DMA_FROM_DEVICE);
 		sh_eth_set_receive_align(skb);
 
 		/* RX descriptor */
@@ -1046,7 +1133,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
 		/* Rx descriptor address set */
 		if (i == 0) {
 			sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
-			if (sh_eth_is_gether(mdp))
+			if (sh_eth_is_gether(mdp) ||
+			    sh_eth_is_rz_fast_ether(mdp))
 				sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
 		}
 	}
@@ -1067,7 +1155,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
 		if (i == 0) {
 			/* Tx descriptor address set */
 			sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
-			if (sh_eth_is_gether(mdp))
+			if (sh_eth_is_gether(mdp) ||
+			    sh_eth_is_rz_fast_ether(mdp))
 				sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
 		}
 	}
@@ -1081,8 +1170,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
 	struct sh_eth_private *mdp = netdev_priv(ndev);
 	int rx_ringsize, tx_ringsize, ret = 0;
 
-	/*
-	 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
+	/* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
 	 * card needs room to do 8 byte alignment, +2 so we can reserve
 	 * the first 2 bytes, and +16 gets room for the status word from the
 	 * card.
@@ -1257,7 +1345,7 @@ static int sh_eth_txfree(struct net_device *ndev)
 {
 	struct sh_eth_private *mdp = netdev_priv(ndev);
 	struct sh_eth_txdesc *txdesc;
-	int freeNum = 0;
+	int free_num = 0;
 	int entry = 0;
 
 	for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
@@ -1271,7 +1359,7 @@ static int sh_eth_txfree(struct net_device *ndev)
 					 txdesc->buffer_length, DMA_TO_DEVICE);
 			dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
 			mdp->tx_skbuff[entry] = NULL;
-			freeNum++;
+			free_num++;
 		}
 		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
 		if (entry >= mdp->num_tx_ring - 1)
@@ -1280,7 +1368,7 @@ static int sh_eth_txfree(struct net_device *ndev)
 		ndev->stats.tx_packets++;
 		ndev->stats.tx_bytes += txdesc->buffer_length;
 	}
-	return freeNum;
+	return free_num;
 }
 
 /* Packet receive function */
@@ -1313,12 +1401,11 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
 		if (!(desc_status & RDFEND))
 			ndev->stats.rx_length_errors++;
 
-		/*
-		 * In case of almost all GETHER/ETHERs, the Receive Frame State
+		/* In case of almost all GETHER/ETHERs, the Receive Frame State
 		 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
-		 * bit 0. However, in case of the R8A7740's GETHER, the RFS
-		 * bits are from bit 25 to bit 16. So, the driver needs right
-		 * shifting by 16.
+		 * bit 0. However, in case of the R8A7740, R8A779x, and
+		 * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
+		 * driver needs right shifting by 16.
 		 */
 		if (mdp->cd->shift_rd0)
 			desc_status >>= 16;
@@ -1374,7 +1461,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
 			if (skb == NULL)
 				break;	/* Better luck next round. */
 			dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
-					DMA_FROM_DEVICE);
+				       DMA_FROM_DEVICE);
 			sh_eth_set_receive_align(skb);
 
 			skb_checksum_none_assert(skb);
@@ -1392,10 +1479,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
 	/* If we don't need to check status, don't. -KDU */
 	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
 		/* fix the values for the next receiving if RDE is set */
-		if (intr_status & EESR_RDE)
-			mdp->cur_rx = mdp->dirty_rx =
-				(sh_eth_read(ndev, RDFAR) -
-				 sh_eth_read(ndev, RDLAR)) >> 4;
+		if (intr_status & EESR_RDE) {
+			u32 count = (sh_eth_read(ndev, RDFAR) -
+				     sh_eth_read(ndev, RDLAR)) >> 4;
+
+			mdp->cur_rx = count;
+			mdp->dirty_rx = count;
+		}
 		sh_eth_write(ndev, EDRRR_R, EDRRR);
 	}
 
@@ -1438,17 +1528,17 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
 				if (mdp->ether_link_active_low)
 					link_stat = ~link_stat;
 			}
-			if (!(link_stat & PHY_ST_LINK))
+			if (!(link_stat & PHY_ST_LINK)) {
 				sh_eth_rcv_snd_disable(ndev);
-			else {
+			} else {
 				/* Link Up */
 				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
-					  ~DMAC_M_ECI, EESIPR);
-				/*clear int */
+						   ~DMAC_M_ECI, EESIPR);
+				/* clear int */
 				sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
-					  ECSR);
+					     ECSR);
 				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
-					  DMAC_M_ECI, EESIPR);
+						   DMAC_M_ECI, EESIPR);
 				/* enable tx and rx */
 				sh_eth_rcv_snd_enable(ndev);
 			}
@@ -1517,11 +1607,11 @@ ignore_link:
 	if (intr_status & mask) {
 		/* Tx error */
 		u32 edtrr = sh_eth_read(ndev, EDTRR);
+
 		/* dmesg */
-		dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
-				intr_status, mdp->cur_tx);
-		dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
-				mdp->dirty_tx, (u32) ndev->state, edtrr);
+		dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
+			intr_status, mdp->cur_tx, mdp->dirty_tx,
+			(u32)ndev->state, edtrr);
 		/* dirty buffer free */
 		sh_eth_txfree(ndev);
 
@@ -1644,7 +1734,8 @@ static void sh_eth_adjust_link(struct net_device *ndev)
 		}
 		if (!mdp->link) {
 			sh_eth_write(ndev,
-				(sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
+				     sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
+				     ECMR);
 			new_state = 1;
 			mdp->link = phydev->link;
 			if (mdp->cd->no_psr || mdp->no_ether_link)
@@ -1671,7 +1762,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
 	struct phy_device *phydev = NULL;
 
 	snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
-		mdp->mii_bus->id , mdp->phy_id);
+		 mdp->mii_bus->id, mdp->phy_id);
 
 	mdp->link = 0;
 	mdp->speed = 0;
@@ -1685,8 +1776,8 @@ static int sh_eth_phy_init(struct net_device *ndev)
 		return PTR_ERR(phydev);
 	}
 
-	dev_info(&ndev->dev, "attached phy %i to driver %s\n",
-		phydev->addr, phydev->drv->name);
+	dev_info(&ndev->dev, "attached PHY %d (IRQ %d) to driver %s\n",
+		 phydev->addr, phydev->irq, phydev->drv->name);
 
 	mdp->phydev = phydev;
 
@@ -1703,15 +1794,13 @@ static int sh_eth_phy_start(struct net_device *ndev)
 	if (ret)
 		return ret;
 
-	/* reset phy - this also wakes it from PDOWN */
-	phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
 	phy_start(mdp->phydev);
 
 	return 0;
 }
 
 static int sh_eth_get_settings(struct net_device *ndev,
-			struct ethtool_cmd *ecmd)
+			       struct ethtool_cmd *ecmd)
 {
 	struct sh_eth_private *mdp = netdev_priv(ndev);
 	unsigned long flags;
@@ -1725,7 +1814,7 @@ static int sh_eth_get_settings(struct net_device *ndev,
 }
 
 static int sh_eth_set_settings(struct net_device *ndev,
-		struct ethtool_cmd *ecmd)
+			       struct ethtool_cmd *ecmd)
 {
 	struct sh_eth_private *mdp = netdev_priv(ndev);
 	unsigned long flags;
@@ -1801,7 +1890,7 @@ static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
 }
 
 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
-			struct ethtool_stats *stats, u64 *data)
+				     struct ethtool_stats *stats, u64 *data)
 {
 	struct sh_eth_private *mdp = netdev_priv(ndev);
 	int i = 0;
@@ -1818,7 +1907,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
 	switch (stringset) {
 	case ETH_SS_STATS:
 		memcpy(data, *sh_eth_gstrings_stats,
-					sizeof(sh_eth_gstrings_stats));
+		       sizeof(sh_eth_gstrings_stats));
 		break;
 	}
 }
@@ -1953,9 +2042,10 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
 
 	netif_stop_queue(ndev);
 
-	if (netif_msg_timer(mdp))
-		dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
-	       " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
+	if (netif_msg_timer(mdp)) {
+		dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x, resetting...\n",
+			ndev->name, (int)sh_eth_read(ndev, EESR));
+	}
 
 	/* tx_errors count up */
 	ndev->stats.tx_errors++;
@@ -2065,6 +2155,9 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
 {
 	struct sh_eth_private *mdp = netdev_priv(ndev);
 
+	if (sh_eth_is_rz_fast_ether(mdp))
+		return &ndev->stats;
+
 	pm_runtime_get_sync(&mdp->pdev->dev);
 
 	ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
@@ -2088,8 +2181,7 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
 }
 
 /* ioctl to device function */
-static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
-				int cmd)
+static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
 {
 	struct sh_eth_private *mdp = netdev_priv(ndev);
 	struct phy_device *phydev = mdp->phydev;
@@ -2209,7 +2301,7 @@ static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
 
 	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
 		sh_eth_tsu_read_entry(reg_offset, c_addr);
-		if (memcmp(addr, c_addr, ETH_ALEN) == 0)
+		if (ether_addr_equal(addr, c_addr))
 			return i;
 	}
 
@@ -2344,8 +2436,7 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
 	unsigned long flags;
 
 	spin_lock_irqsave(&mdp->lock, flags);
-	/*
-	 * Initial condition is MCT = 1, PRM = 0.
+	/* Initial condition is MCT = 1, PRM = 0.
 	 * Depending on ndev->flags, set PRM or clear MCT
 	 */
 	ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
@@ -2411,8 +2502,7 @@ static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
 
 	mdp->vlan_num_ids++;
 
-	/*
-	 * The controller has one VLAN tag HW filter. So, if the filter is
+	/* The controller has one VLAN tag HW filter. So, if the filter is
 	 * already enabled, the driver disables it and the filte
 	 */
 	if (mdp->vlan_num_ids > 1) {
@@ -2449,6 +2539,11 @@ static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
 /* SuperH's TSU register init function */
 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
 {
+	if (sh_eth_is_rz_fast_ether(mdp)) {
+		sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
+		return;
+	}
+
 	sh_eth_tsu_write(mdp, 0, TSU_FWEN0);	/* Disable forward(0->1) */
 	sh_eth_tsu_write(mdp, 0, TSU_FWEN1);	/* Disable forward(1->0) */
 	sh_eth_tsu_write(mdp, 0, TSU_FCM);	/* forward fifo 3k-3k */
@@ -2528,7 +2623,7 @@ static int sh_mdio_init(struct net_device *ndev, int id,
 	mdp->mii_bus->name = "sh_mii";
 	mdp->mii_bus->parent = &ndev->dev;
 	snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
-		mdp->pdev->name, id);
+		 mdp->pdev->name, id);
 
 	/* PHY IRQ */
 	mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
@@ -2541,6 +2636,8 @@ static int sh_mdio_init(struct net_device *ndev, int id,
 
 	for (i = 0; i < PHY_MAX_ADDR; i++)
 		mdp->mii_bus->irq[i] = PHY_POLL;
+	if (pd->phy_irq > 0)
+		mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
 
 	/* register mdio bus */
 	ret = mdiobus_register(mdp->mii_bus);
@@ -2566,6 +2663,9 @@ static const u16 *sh_eth_get_register_offset(int register_type)
 	case SH_ETH_REG_GIGABIT:
 		reg_offset = sh_eth_offset_gigabit;
 		break;
+	case SH_ETH_REG_FAST_RZ:
+		reg_offset = sh_eth_offset_fast_rz;
+		break;
 	case SH_ETH_REG_FAST_RCAR:
 		reg_offset = sh_eth_offset_fast_rcar;
 		break;
@@ -2739,7 +2839,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
 
 	/* print device information */
 	pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
-	       (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
+		(u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
 
 	platform_set_drvdata(pdev, ndev);
 
@@ -2777,8 +2877,7 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
 #ifdef CONFIG_PM
 static int sh_eth_runtime_nop(struct device *dev)
 {
-	/*
-	 * Runtime PM callback shared between ->runtime_suspend()
+	/* Runtime PM callback shared between ->runtime_suspend()
 	 * and ->runtime_resume(). Simply returns success.
 	 *
 	 * This driver re-initializes all registers after
@@ -2805,9 +2904,11 @@ static struct platform_device_id sh_eth_id_table[] = {
 	{ "sh7757-ether", (kernel_ulong_t)&sh7757_data },
 	{ "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
 	{ "sh7763-gether", (kernel_ulong_t)&sh7763_data },
+	{ "r7s72100-ether", (kernel_ulong_t)&r7s72100_data },
 	{ "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
 	{ "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
-	{ "r8a7790-ether", (kernel_ulong_t)&r8a7790_data },
+	{ "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
+	{ "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
 	{ }
 };
 MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index f32c1692d310..6075915b88ec 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -1,5 +1,4 @@
-/*
- *  SuperH Ethernet device driver
+/*  SuperH Ethernet device driver
  *
  *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
  *  Copyright (C) 2008-2012 Renesas Solutions Corp.
@@ -12,9 +11,6 @@
  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  *  more details.
- *  You should have received a copy of the GNU General Public License along with
- *  this program; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  *
  *  The full GNU General Public License is included in this distribution in
  *  the file called "COPYING".
@@ -159,6 +155,7 @@ enum {
 
 enum {
 	SH_ETH_REG_GIGABIT,
+	SH_ETH_REG_FAST_RZ,
 	SH_ETH_REG_FAST_RCAR,
 	SH_ETH_REG_FAST_SH4,
 	SH_ETH_REG_FAST_SH3_SH2
@@ -171,10 +168,9 @@ enum {
 #define SH2_SH3_SKB_RX_ALIGN	2
 #endif
 
-/*
- * Register's bits
+/* Register's bits
  */
-/* EDSR : sh7734, sh7757, sh7763, and r8a7740 only */
+/* EDSR : sh7734, sh7757, sh7763, r8a7740, and r7s72100 only */
 enum EDSR_BIT {
 	EDSR_ENT = 0x01, EDSR_ENR = 0x02,
 };
@@ -199,7 +195,7 @@ enum DMAC_T_BIT {
 	EDTRR_TRNS_ETHER = 0x01,
 };
 
-/* EDRRR*/
+/* EDRRR */
 enum EDRRR_R_BIT {
 	EDRRR_R = 0x01,
 };
@@ -422,8 +418,7 @@ enum TSU_FWSLC_BIT {
 #define TSU_VTAG_ENABLE		0x80000000
 #define TSU_VTAG_VID_MASK	0x00000fff
 
-/*
- * The sh ether Tx buffer descriptors.
+/* The sh ether Tx buffer descriptors.
  * This structure should be 20 bytes.
  */
 struct sh_eth_txdesc {
@@ -437,10 +432,9 @@ struct sh_eth_txdesc {
 #endif
 	u32 addr;		/* TD2 */
 	u32 pad1;		/* padding data */
-} __attribute__((aligned(2), packed));
+} __aligned(2) __packed;
 
-/*
- * The sh ether Rx buffer descriptors.
+/* The sh ether Rx buffer descriptors.
  * This structure should be 20 bytes.
  */
 struct sh_eth_rxdesc {
@@ -454,7 +448,7 @@ struct sh_eth_rxdesc {
 #endif
 	u32 addr;		/* RD2 */
 	u32 pad0;		/* padding data */
-} __attribute__((aligned(2), packed));
+} __aligned(2) __packed;
 
 /* This structure is used by each CPU dependency handling. */
 struct sh_eth_cpu_data {
@@ -480,16 +474,16 @@ struct sh_eth_cpu_data {
 	unsigned long eesr_err_check;
 
 	/* hardware features */
-	unsigned long irq_flags;	/* IRQ configuration flags */
-	unsigned no_psr:1;		/* EtherC DO NOT have PSR */
-	unsigned apr:1;			/* EtherC have APR */
-	unsigned mpr:1;			/* EtherC have MPR */
-	unsigned tpauser:1;		/* EtherC have TPAUSER */
-	unsigned bculr:1;		/* EtherC have BCULR */
-	unsigned tsu:1;			/* EtherC have TSU */
-	unsigned hw_swap:1;		/* E-DMAC have DE bit in EDMR */
-	unsigned rpadir:1;		/* E-DMAC have RPADIR */
-	unsigned no_trimd:1;		/* E-DMAC DO NOT have TRIMD */
+	unsigned long irq_flags; /* IRQ configuration flags */
+	unsigned no_psr:1;	/* EtherC DO NOT have PSR */
+	unsigned apr:1;		/* EtherC have APR */
+	unsigned mpr:1;		/* EtherC have MPR */
+	unsigned tpauser:1;	/* EtherC have TPAUSER */
+	unsigned bculr:1;	/* EtherC have BCULR */
+	unsigned tsu:1;		/* EtherC have TSU */
+	unsigned hw_swap:1;	/* E-DMAC have DE bit in EDMR */
+	unsigned rpadir:1;	/* E-DMAC have RPADIR */
+	unsigned no_trimd:1;	/* E-DMAC DO NOT have TRIMD */
 	unsigned no_ade:1;	/* E-DMAC DO NOT have ADE bit in EESR */
 	unsigned hw_crc:1;	/* E-DMAC have CSMR */
 	unsigned select_mii:1;	/* EtherC have RMII_MII (MII select register) */
@@ -511,14 +505,14 @@ struct sh_eth_private {
 	struct sh_eth_txdesc *tx_ring;
 	struct sk_buff **rx_skbuff;
 	struct sk_buff **tx_skbuff;
-	spinlock_t lock;
-	u32 cur_rx, dirty_rx;	/* Producer/consumer ring indices */
+	spinlock_t lock;		/* Register access lock */
+	u32 cur_rx, dirty_rx;		/* Producer/consumer ring indices */
 	u32 cur_tx, dirty_tx;
-	u32 rx_buf_sz;		/* Based on MTU+slack. */
+	u32 rx_buf_sz;			/* Based on MTU+slack. */
 	int edmac_endian;
 	struct napi_struct napi;
 	/* MII transceiver section. */
-	u32 phy_id;					/* PHY ID */
+	u32 phy_id;			/* PHY ID */
 	struct mii_bus *mii_bus;	/* MDIO bus control */
 	struct phy_device *phydev;	/* PHY device control */
 	int link;
@@ -526,8 +520,8 @@ struct sh_eth_private {
 	int msg_enable;
 	int speed;
 	int duplex;
-	int port;		/* for TSU */
-	int vlan_num_ids;	/* for VLAN tag filter */
+	int port;			/* for TSU */
+	int vlan_num_ids;		/* for VLAN tag filter */
 
 	unsigned no_ether_link:1;
 	unsigned ether_link_active_low:1;
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index a99739c5142c..1f4449ad8900 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -14,7 +14,6 @@
 #include <linux/interrupt.h>
 #include <linux/types.h>
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/spinlock.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index c76571886011..69e4fd21adb4 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -11,7 +11,6 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
-#include <linux/init.h>
 #include <linux/types.h>
 #include <linux/interrupt.h>
 #include <linux/string.h>
@@ -356,7 +355,7 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
 		if (pkt_status & SEEQ_RSTAT_FIG) {
 			/* Packet is OK. */
 			/* We don't want to receive our own packets */
-			if (memcmp(rd->skb->data + 6, dev->dev_addr, ETH_ALEN)) {
+			if (!ether_addr_equal(rd->skb->data + 6, dev->dev_addr)) {
 				if (len > rx_copybreak) {
 					skb = rd->skb;
 					newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 676c3c057bfb..174a92f5fe51 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -14,6 +14,7 @@
 #include "mcdi_pcol.h"
 #include "nic.h"
 #include "workarounds.h"
+#include "selftest.h"
 #include <linux/in.h>
 #include <linux/jhash.h>
 #include <linux/wait.h>
@@ -52,31 +53,31 @@ struct efx_ef10_filter_table {
 
 	struct {
 		unsigned long spec;	/* pointer to spec plus flag bits */
-/* BUSY flag indicates that an update is in progress.  STACK_OLD is
- * used to mark and sweep stack-owned MAC filters.
+/* BUSY flag indicates that an update is in progress.  AUTO_OLD is
+ * used to mark and sweep MAC filters for the device address lists.
  */
 #define EFX_EF10_FILTER_FLAG_BUSY	1UL
-#define EFX_EF10_FILTER_FLAG_STACK_OLD	2UL
+#define EFX_EF10_FILTER_FLAG_AUTO_OLD	2UL
 #define EFX_EF10_FILTER_FLAGS		3UL
 		u64 handle;		/* firmware handle */
 	} *entry;
 	wait_queue_head_t waitq;
 /* Shadow of net_device address lists, guarded by mac_lock */
-#define EFX_EF10_FILTER_STACK_UC_MAX	32
-#define EFX_EF10_FILTER_STACK_MC_MAX	256
+#define EFX_EF10_FILTER_DEV_UC_MAX	32
+#define EFX_EF10_FILTER_DEV_MC_MAX	256
 	struct {
 		u8 addr[ETH_ALEN];
 		u16 id;
-	} stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX],
-	  stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX];
-	int stack_uc_count;		/* negative for PROMISC */
-	int stack_mc_count;		/* negative for PROMISC/ALLMULTI */
+	} dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX],
+	  dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
+	int dev_uc_count;		/* negative for PROMISC */
+	int dev_mc_count;		/* negative for PROMISC/ALLMULTI */
 };
 
 /* An arbitrary search limit for the software hash table */
 #define EFX_EF10_FILTER_SEARCH_LIMIT 200
 
-static void efx_ef10_rx_push_indir_table(struct efx_nic *efx);
+static void efx_ef10_rx_push_rss_config(struct efx_nic *efx);
 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
 static void efx_ef10_filter_table_remove(struct efx_nic *efx);
 
@@ -263,6 +264,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
 	if (rc)
 		goto fail3;
 
+	efx_ptp_probe(efx, NULL);
+
 	return 0;
 
 fail3:
@@ -277,11 +280,17 @@ fail1:
 
 static int efx_ef10_free_vis(struct efx_nic *efx)
 {
-	int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL);
+	MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+	size_t outlen;
+	int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
+				    outbuf, sizeof(outbuf), &outlen);
 
 	/* -EALREADY means nothing to free, so ignore */
 	if (rc == -EALREADY)
 		rc = 0;
+	if (rc)
+		efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
+				       rc);
 	return rc;
 }
 
@@ -465,9 +474,10 @@ static void efx_ef10_remove(struct efx_nic *efx)
 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
 	int rc;
 
+	efx_ptp_remove(efx);
+
 	efx_mcdi_mon_remove(efx);
 
-	/* This needs to be after efx_ptp_remove_channel() with no filters */
 	efx_ef10_rx_free_indir_table(efx);
 
 	if (nic_data->wc_membase)
@@ -669,10 +679,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
 		nic_data->must_restore_piobufs = false;
 	}
 
-	efx_ef10_rx_push_indir_table(efx);
+	efx_ef10_rx_push_rss_config(efx);
 	return 0;
 }
 
+static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+	/* All our allocations have been reset */
+	nic_data->must_realloc_vis = true;
+	nic_data->must_restore_filters = true;
+	nic_data->must_restore_piobufs = true;
+	nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+}
+
 static int efx_ef10_map_reset_flags(u32 *flags)
 {
 	enum {
@@ -703,6 +724,19 @@ static int efx_ef10_map_reset_flags(u32 *flags)
 	return -EINVAL;
 }
 
+static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
+{
+	int rc = efx_mcdi_reset(efx, reset_type);
+
+	/* If it was a port reset, trigger reallocation of MC resources.
+	 * Note that on an MC reset nothing needs to be done now because we'll
+	 * detect the MC reset later and handle it then.
+	 */
+	if (reset_type == RESET_TYPE_ALL && !rc)
+		efx_ef10_reset_mc_allocations(efx);
+	return rc;
+}
+
 #define EF10_DMA_STAT(ext_name, mcdi_name)			\
 	[EF10_STAT_ ## ext_name] =				\
 	{ #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
@@ -764,8 +798,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
 	EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
 	EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
 	EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
-	EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
-	EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
+	EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
+	EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
 };
 
 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) |		\
@@ -834,8 +868,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
 	(1ULL << EF10_STAT_rx_dp_q_disabled_packets) |			\
 	(1ULL << EF10_STAT_rx_dp_di_dropped_packets) |			\
 	(1ULL << EF10_STAT_rx_dp_streaming_packets) |			\
-	(1ULL << EF10_STAT_rx_dp_emerg_fetch) |				\
-	(1ULL << EF10_STAT_rx_dp_emerg_wait))
+	(1ULL << EF10_STAT_rx_dp_hlb_fetch) |				\
+	(1ULL << EF10_STAT_rx_dp_hlb_wait))
 
 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
 {
@@ -901,6 +935,7 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
 		return -EAGAIN;
 
 	/* Update derived statistics */
+	efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]);
 	stats[EF10_STAT_rx_good_bytes] =
 		stats[EF10_STAT_rx_bytes] -
 		stats[EF10_STAT_rx_bytes_minus_good_bytes];
@@ -1067,10 +1102,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
 	nic_data->warm_boot_count = rc;
 
 	/* All our allocations have been reset */
-	nic_data->must_realloc_vis = true;
-	nic_data->must_restore_filters = true;
-	nic_data->must_restore_piobufs = true;
-	nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+	efx_ef10_reset_mc_allocations(efx);
 
 	/* The datapath firmware might have been changed */
 	nic_data->must_check_datapath_caps = true;
@@ -1241,8 +1273,8 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
 	return;
 
 fail:
-	WARN_ON(true);
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+	netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
+		    tx_queue->queue);
 }
 
 static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
@@ -1256,7 +1288,7 @@ static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
 	MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
 		       tx_queue->queue);
 
-	rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
 			  outbuf, sizeof(outbuf), &outlen);
 
 	if (rc && rc != -EALREADY)
@@ -1265,7 +1297,8 @@ static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
 	return;
 
 fail:
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+	efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
+			       outbuf, outlen, rc);
 }
 
 static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
@@ -1408,12 +1441,12 @@ static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
 	nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
 }
 
-static void efx_ef10_rx_push_indir_table(struct efx_nic *efx)
+static void efx_ef10_rx_push_rss_config(struct efx_nic *efx)
 {
 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
 	int rc;
 
-	netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n");
+	netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n");
 
 	if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
 		rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
@@ -1461,8 +1494,9 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
 		       efx_rx_queue_index(rx_queue));
-	MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS,
-			      INIT_RXQ_IN_FLAG_PREFIX, 1);
+	MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
+			      INIT_RXQ_IN_FLAG_PREFIX, 1,
+			      INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
 
@@ -1481,13 +1515,8 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
 	rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
 			  outbuf, sizeof(outbuf), &outlen);
 	if (rc)
-		goto fail;
-
-	return;
-
-fail:
-	WARN_ON(true);
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+		netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
+			    efx_rx_queue_index(rx_queue));
 }
 
 static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
@@ -1501,7 +1530,7 @@ static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
 	MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
 		       efx_rx_queue_index(rx_queue));
 
-	rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
 			  outbuf, sizeof(outbuf), &outlen);
 
 	if (rc && rc != -EALREADY)
@@ -1510,7 +1539,8 @@ static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
 	return;
 
 fail:
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+	efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
+			       outbuf, outlen, rc);
 }
 
 static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
@@ -1647,15 +1677,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
 
 	rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
 			  outbuf, sizeof(outbuf), &outlen);
-	if (rc)
-		goto fail;
-
 	/* IRQ return is ignored */
-
-	return 0;
-
-fail:
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -1669,7 +1691,7 @@ static void efx_ef10_ev_fini(struct efx_channel *channel)
 
 	MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
 
-	rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
 			  outbuf, sizeof(outbuf), &outlen);
 
 	if (rc && rc != -EALREADY)
@@ -1678,7 +1700,8 @@ static void efx_ef10_ev_fini(struct efx_channel *channel)
 	return;
 
 fail:
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+	efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
+			       outbuf, outlen, rc);
 }
 
 static void efx_ef10_ev_remove(struct efx_channel *channel)
@@ -1717,8 +1740,6 @@ static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
 {
 	unsigned int rx_desc_ptr;
 
-	WARN_ON(rx_queue->scatter_n == 0);
-
 	netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
 		  "scattered RX aborted (dropping %u buffers)\n",
 		  rx_queue->scatter_n);
@@ -1754,7 +1775,10 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
 	rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
 	rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
 
-	WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT));
+	if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
+		netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
+			    EFX_QWORD_FMT "\n",
+			    EFX_QWORD_VAL(*event));
 
 	rx_queue = efx_channel_get_rx_queue(channel);
 
@@ -1765,17 +1789,27 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
 		   ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
 
 	if (n_descs != rx_queue->scatter_n + 1) {
+		struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
 		/* detect rx abort */
 		if (unlikely(n_descs == rx_queue->scatter_n)) {
-			WARN_ON(rx_bytes != 0);
+			if (rx_queue->scatter_n == 0 || rx_bytes != 0)
+				netdev_WARN(efx->net_dev,
+					    "invalid RX abort: scatter_n=%u event="
+					    EFX_QWORD_FMT "\n",
+					    rx_queue->scatter_n,
+					    EFX_QWORD_VAL(*event));
 			efx_ef10_handle_rx_abort(rx_queue);
 			return 0;
 		}
 
-		if (unlikely(rx_queue->scatter_n != 0)) {
-			/* Scattered packet completions cannot be
-			 * merged, so something has gone wrong.
-			 */
+		/* Check that RX completion merging is valid, i.e.
+		 * the current firmware supports it and this is a
+		 * non-scattered packet.
+		 */
+		if (!(nic_data->datapath_caps &
+		      (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
+		    rx_queue->scatter_n != 0 || rx_cont) {
 			efx_ef10_handle_rx_bad_lbits(
 				rx_queue, next_ptr_lbits,
 				(rx_queue->removed_count +
@@ -1901,7 +1935,7 @@ static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
 		 * events, so efx_process_channel() won't refill the
 		 * queue. Refill it here
 		 */
-		efx_fast_push_rx_descriptors(&channel->rx_queue);
+		efx_fast_push_rx_descriptors(&channel->rx_queue, true);
 		break;
 	default:
 		netif_err(efx, hw, efx->net_dev,
@@ -2232,7 +2266,9 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
 		       MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
 		       MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
-	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id);
+	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
+		       spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
+		       0 : spec->dmaq_id);
 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
 		       (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
 		       MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
@@ -2257,6 +2293,8 @@ static int efx_ef10_filter_push(struct efx_nic *efx,
 			  outbuf, sizeof(outbuf), NULL);
 	if (rc == 0)
 		*handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
+	if (rc == -ENOSPC)
+		rc = -EBUSY; /* to match efx_farch_filter_insert() */
 	return rc;
 }
 
@@ -2326,10 +2364,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
 				    EFX_EF10_FILTER_FLAG_BUSY)
 					break;
 				if (spec->priority < saved_spec->priority &&
-				    !(saved_spec->priority ==
-				      EFX_FILTER_PRI_REQUIRED &&
-				      saved_spec->flags &
-				      EFX_FILTER_FLAG_RX_STACK)) {
+				    spec->priority != EFX_FILTER_PRI_AUTO) {
 					rc = -EPERM;
 					goto out_unlock;
 				}
@@ -2383,11 +2418,13 @@ found:
 	 */
 	saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
 	if (saved_spec) {
-		if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
+		if (spec->priority == EFX_FILTER_PRI_AUTO &&
+		    saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
 			/* Just make sure it won't be removed */
-			saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
+			if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
+				saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
 			table->entry[ins_index].spec &=
-				~EFX_EF10_FILTER_FLAG_STACK_OLD;
+				~EFX_EF10_FILTER_FLAG_AUTO_OLD;
 			rc = ins_index;
 			goto out_unlock;
 		}
@@ -2427,8 +2464,11 @@ found:
 	if (rc == 0) {
 		if (replacing) {
 			/* Update the fields that may differ */
+			if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
+				saved_spec->flags |=
+					EFX_FILTER_FLAG_RX_OVER_AUTO;
 			saved_spec->priority = spec->priority;
-			saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK;
+			saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
 			saved_spec->flags |= spec->flags;
 			saved_spec->rss_context = spec->rss_context;
 			saved_spec->dmaq_id = spec->dmaq_id;
@@ -2497,13 +2537,13 @@ static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
 }
 
 /* Remove a filter.
- * If !stack_requested, remove by ID
- * If stack_requested, remove by index
+ * If !by_index, remove by ID
+ * If by_index, remove by index
  * Filter ID may come from userland and must be range-checked.
  */
 static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
-					   enum efx_filter_priority priority,
-					   u32 filter_id, bool stack_requested)
+					   unsigned int priority_mask,
+					   u32 filter_id, bool by_index)
 {
 	unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
 	struct efx_ef10_filter_table *table = efx->filter_state;
@@ -2527,26 +2567,41 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
 		spin_unlock_bh(&efx->filter_lock);
 		schedule();
 	}
+
 	spec = efx_ef10_filter_entry_spec(table, filter_idx);
-	if (!spec || spec->priority > priority ||
-	    (!stack_requested &&
+	if (!spec ||
+	    (!by_index &&
 	     efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
 	     filter_id / HUNT_FILTER_TBL_ROWS)) {
 		rc = -ENOENT;
 		goto out_unlock;
 	}
+
+	if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
+	    priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
+		/* Just remove flags */
+		spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
+		table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
+		rc = 0;
+		goto out_unlock;
+	}
+
+	if (!(priority_mask & (1U << spec->priority))) {
+		rc = -ENOENT;
+		goto out_unlock;
+	}
+
 	table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
 	spin_unlock_bh(&efx->filter_lock);
 
-	if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) {
-		/* Reset steering of a stack-owned filter */
+	if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
+		/* Reset to an automatic filter */
 
 		struct efx_filter_spec new_spec = *spec;
 
-		new_spec.priority = EFX_FILTER_PRI_REQUIRED;
+		new_spec.priority = EFX_FILTER_PRI_AUTO;
 		new_spec.flags = (EFX_FILTER_FLAG_RX |
-				  EFX_FILTER_FLAG_RX_RSS |
-				  EFX_FILTER_FLAG_RX_STACK);
+				  EFX_FILTER_FLAG_RX_RSS);
 		new_spec.dmaq_id = 0;
 		new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
 		rc = efx_ef10_filter_push(efx, &new_spec,
@@ -2574,6 +2629,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
 			efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
 		}
 	}
+
 	table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
 	wake_up_all(&table->waitq);
 out_unlock:
@@ -2586,7 +2642,8 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
 				       enum efx_filter_priority priority,
 				       u32 filter_id)
 {
-	return efx_ef10_filter_remove_internal(efx, priority, filter_id, false);
+	return efx_ef10_filter_remove_internal(efx, 1U << priority,
+					       filter_id, false);
 }
 
 static int efx_ef10_filter_get_safe(struct efx_nic *efx,
@@ -2612,10 +2669,24 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
 	return rc;
 }
 
-static void efx_ef10_filter_clear_rx(struct efx_nic *efx,
+static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
 				     enum efx_filter_priority priority)
 {
-	/* TODO */
+	unsigned int priority_mask;
+	unsigned int i;
+	int rc;
+
+	priority_mask = (((1U << (priority + 1)) - 1) &
+			 ~(1U << EFX_FILTER_PRI_AUTO));
+
+	for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
+		rc = efx_ef10_filter_remove_internal(efx, priority_mask,
+						     i, true);
+		if (rc && rc != -ENOENT)
+			return rc;
+	}
+
+	return 0;
 }
 
 static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
@@ -2716,8 +2787,6 @@ static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
 				rc = -EBUSY;
 				goto fail_unlock;
 			}
-			EFX_WARN_ON_PARANOID(saved_spec->flags &
-					     EFX_FILTER_FLAG_RX_STACK);
 			if (spec->priority < saved_spec->priority) {
 				rc = -EPERM;
 				goto fail_unlock;
@@ -3027,8 +3096,11 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
 			       table->entry[filter_idx].handle);
 		rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
 				  NULL, 0, NULL);
-
-		WARN_ON(rc != 0);
+		if (rc)
+			netdev_WARN(efx->net_dev,
+				    "filter_idx=%#x handle=%#llx\n",
+				    filter_idx,
+				    table->entry[filter_idx].handle);
 		kfree(spec);
 	}
 
@@ -3052,15 +3124,15 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
 
 	/* Mark old filters that may need to be removed */
 	spin_lock_bh(&efx->filter_lock);
-	n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count;
+	n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
 	for (i = 0; i < n; i++) {
-		filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
-		table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
+		filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
+		table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
 	}
-	n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count;
+	n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count;
 	for (i = 0; i < n; i++) {
-		filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
-		table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
+		filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
+		table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
 	}
 	spin_unlock_bh(&efx->filter_lock);
 
@@ -3069,28 +3141,28 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
 	 */
 	netif_addr_lock_bh(net_dev);
 	if (net_dev->flags & IFF_PROMISC ||
-	    netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) {
-		table->stack_uc_count = -1;
+	    netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) {
+		table->dev_uc_count = -1;
 	} else {
-		table->stack_uc_count = 1 + netdev_uc_count(net_dev);
-		memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr,
+		table->dev_uc_count = 1 + netdev_uc_count(net_dev);
+		memcpy(table->dev_uc_list[0].addr, net_dev->dev_addr,
 		       ETH_ALEN);
 		i = 1;
 		netdev_for_each_uc_addr(uc, net_dev) {
-			memcpy(table->stack_uc_list[i].addr,
+			memcpy(table->dev_uc_list[i].addr,
 			       uc->addr, ETH_ALEN);
 			i++;
 		}
 	}
 	if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
-	    netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) {
-		table->stack_mc_count = -1;
+	    netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) {
+		table->dev_mc_count = -1;
 	} else {
-		table->stack_mc_count = 1 + netdev_mc_count(net_dev);
-		eth_broadcast_addr(table->stack_mc_list[0].addr);
+		table->dev_mc_count = 1 + netdev_mc_count(net_dev);
+		eth_broadcast_addr(table->dev_mc_list[0].addr);
 		i = 1;
 		netdev_for_each_mc_addr(mc, net_dev) {
-			memcpy(table->stack_mc_list[i].addr,
+			memcpy(table->dev_mc_list[i].addr,
 			       mc->addr, ETH_ALEN);
 			i++;
 		}
@@ -3098,90 +3170,86 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
 	netif_addr_unlock_bh(net_dev);
 
 	/* Insert/renew unicast filters */
-	if (table->stack_uc_count >= 0) {
-		for (i = 0; i < table->stack_uc_count; i++) {
-			efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
-					   EFX_FILTER_FLAG_RX_RSS |
-					   EFX_FILTER_FLAG_RX_STACK,
+	if (table->dev_uc_count >= 0) {
+		for (i = 0; i < table->dev_uc_count; i++) {
+			efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+					   EFX_FILTER_FLAG_RX_RSS,
 					   0);
 			efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
-						 table->stack_uc_list[i].addr);
+						 table->dev_uc_list[i].addr);
 			rc = efx_ef10_filter_insert(efx, &spec, true);
 			if (rc < 0) {
 				/* Fall back to unicast-promisc */
 				while (i--)
 					efx_ef10_filter_remove_safe(
-						efx, EFX_FILTER_PRI_REQUIRED,
-						table->stack_uc_list[i].id);
-				table->stack_uc_count = -1;
+						efx, EFX_FILTER_PRI_AUTO,
+						table->dev_uc_list[i].id);
+				table->dev_uc_count = -1;
 				break;
 			}
-			table->stack_uc_list[i].id = rc;
+			table->dev_uc_list[i].id = rc;
 		}
 	}
-	if (table->stack_uc_count < 0) {
-		efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
-				   EFX_FILTER_FLAG_RX_RSS |
-				   EFX_FILTER_FLAG_RX_STACK,
+	if (table->dev_uc_count < 0) {
+		efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+				   EFX_FILTER_FLAG_RX_RSS,
 				   0);
 		efx_filter_set_uc_def(&spec);
 		rc = efx_ef10_filter_insert(efx, &spec, true);
 		if (rc < 0) {
 			WARN_ON(1);
-			table->stack_uc_count = 0;
+			table->dev_uc_count = 0;
 		} else {
-			table->stack_uc_list[0].id = rc;
+			table->dev_uc_list[0].id = rc;
 		}
 	}
 
 	/* Insert/renew multicast filters */
-	if (table->stack_mc_count >= 0) {
-		for (i = 0; i < table->stack_mc_count; i++) {
-			efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
-					   EFX_FILTER_FLAG_RX_RSS |
-					   EFX_FILTER_FLAG_RX_STACK,
+	if (table->dev_mc_count >= 0) {
+		for (i = 0; i < table->dev_mc_count; i++) {
+			efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+					   EFX_FILTER_FLAG_RX_RSS,
 					   0);
 			efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
-						 table->stack_mc_list[i].addr);
+						 table->dev_mc_list[i].addr);
 			rc = efx_ef10_filter_insert(efx, &spec, true);
 			if (rc < 0) {
 				/* Fall back to multicast-promisc */
 				while (i--)
 					efx_ef10_filter_remove_safe(
-						efx, EFX_FILTER_PRI_REQUIRED,
-						table->stack_mc_list[i].id);
-				table->stack_mc_count = -1;
+						efx, EFX_FILTER_PRI_AUTO,
+						table->dev_mc_list[i].id);
+				table->dev_mc_count = -1;
 				break;
 			}
-			table->stack_mc_list[i].id = rc;
+			table->dev_mc_list[i].id = rc;
 		}
 	}
-	if (table->stack_mc_count < 0) {
-		efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
-				   EFX_FILTER_FLAG_RX_RSS |
-				   EFX_FILTER_FLAG_RX_STACK,
+	if (table->dev_mc_count < 0) {
+		efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+				   EFX_FILTER_FLAG_RX_RSS,
 				   0);
 		efx_filter_set_mc_def(&spec);
 		rc = efx_ef10_filter_insert(efx, &spec, true);
 		if (rc < 0) {
 			WARN_ON(1);
-			table->stack_mc_count = 0;
+			table->dev_mc_count = 0;
 		} else {
-			table->stack_mc_list[0].id = rc;
+			table->dev_mc_list[0].id = rc;
 		}
 	}
 
 	/* Remove filters that weren't renewed.  Since nothing else
-	 * changes the STACK_OLD flag or removes these filters, we
+	 * changes the AUTO_OLD flag or removes these filters, we
 	 * don't need to hold the filter_lock while scanning for
 	 * these filters.
 	 */
 	for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
 		if (ACCESS_ONCE(table->entry[i].spec) &
-		    EFX_EF10_FILTER_FLAG_STACK_OLD) {
-			if (efx_ef10_filter_remove_internal(efx,
-					EFX_FILTER_PRI_REQUIRED,
-					i, true) < 0)
+		    EFX_EF10_FILTER_FLAG_AUTO_OLD) {
+			if (efx_ef10_filter_remove_internal(
+				    efx, 1U << EFX_FILTER_PRI_AUTO,
+				    i, true) < 0)
 				remove_failed = true;
 		}
 	}
@@ -3195,6 +3263,87 @@ static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
 	return efx_mcdi_set_mac(efx);
 }
 
+static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
+
+	MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
+	return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
+			    NULL, 0, NULL);
+}
+
+/* MC BISTs follow a different poll mechanism to phy BISTs.
+ * The BIST is done in the poll handler on the MC, and the MCDI command
+ * will block until the BIST is done.
+ */
+static int efx_ef10_poll_bist(struct efx_nic *efx)
+{
+	int rc;
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
+	size_t outlen;
+	u32 result;
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
+			   outbuf, sizeof(outbuf), &outlen);
+	if (rc != 0)
+		return rc;
+
+	if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
+		return -EIO;
+
+	result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
+	switch (result) {
+	case MC_CMD_POLL_BIST_PASSED:
+		netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
+		return 0;
+	case MC_CMD_POLL_BIST_TIMEOUT:
+		netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
+		return -EIO;
+	case MC_CMD_POLL_BIST_FAILED:
+		netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
+		return -EIO;
+	default:
+		netif_err(efx, hw, efx->net_dev,
+			  "BIST returned unknown result %u", result);
+		return -EIO;
+	}
+}
+
+static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
+{
+	int rc;
+
+	netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
+
+	rc = efx_ef10_start_bist(efx, bist_type);
+	if (rc != 0)
+		return rc;
+
+	return efx_ef10_poll_bist(efx);
+}
+
+static int
+efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+	int rc, rc2;
+
+	efx_reset_down(efx, RESET_TYPE_WORLD);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
+			  NULL, 0, NULL, 0, NULL);
+	if (rc != 0)
+		goto out;
+
+	tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
+	tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
+
+	rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
+
+out:
+	rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
+	return rc ? rc : rc2;
+}
+
 #ifdef CONFIG_SFC_MTD
 
 struct efx_ef10_nvram_type_info {
@@ -3213,6 +3362,7 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0,   1, "sfc_exp_rom_cfg" },
 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0,   2, "sfc_exp_rom_cfg" },
 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0,   3, "sfc_exp_rom_cfg" },
+	{ NVRAM_PARTITION_TYPE_LICENSE,		   0,    0, "sfc_license" },
 	{ NVRAM_PARTITION_TYPE_PHY_MIN,		   0xff, 0, "sfc_phy_fw" },
 };
 
@@ -3320,6 +3470,119 @@ static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
 	_efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
 }
 
+static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
+					   bool temp)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
+	int rc;
+
+	if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
+	    channel->sync_events_state == SYNC_EVENTS_VALID ||
+	    (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
+		return 0;
+	channel->sync_events_state = SYNC_EVENTS_REQUESTED;
+
+	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
+	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+	MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
+		       channel->channel);
+
+	rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
+			  inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+	if (rc != 0)
+		channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
+						    SYNC_EVENTS_DISABLED;
+
+	return rc;
+}
+
+static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
+					    bool temp)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
+	int rc;
+
+	if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
+	    (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
+		return 0;
+	if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
+		channel->sync_events_state = SYNC_EVENTS_DISABLED;
+		return 0;
+	}
+	channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
+					    SYNC_EVENTS_DISABLED;
+
+	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
+	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+	MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
+		       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
+	MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
+		       channel->channel);
+
+	rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
+			  inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+	return rc;
+}
+
+static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
+					   bool temp)
+{
+	int (*set)(struct efx_channel *channel, bool temp);
+	struct efx_channel *channel;
+
+	set = en ?
+	      efx_ef10_rx_enable_timestamping :
+	      efx_ef10_rx_disable_timestamping;
+
+	efx_for_each_channel(channel, efx) {
+		int rc = set(channel, temp);
+		if (en && rc != 0) {
+			efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
+				      struct hwtstamp_config *init)
+{
+	int rc;
+
+	switch (init->rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		efx_ef10_ptp_set_ts_sync_events(efx, false, false);
+		/* if TX timestamping is still requested then leave PTP on */
+		return efx_ptp_change_mode(efx,
+					   init->tx_type != HWTSTAMP_TX_OFF, 0);
+	case HWTSTAMP_FILTER_ALL:
+	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+		init->rx_filter = HWTSTAMP_FILTER_ALL;
+		rc = efx_ptp_change_mode(efx, true, 0);
+		if (!rc)
+			rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
+		if (rc)
+			efx_ptp_change_mode(efx, false, 0);
+		return rc;
+	default:
+		return -ERANGE;
+	}
+}
+
 const struct efx_nic_type efx_hunt_a0_nic_type = {
 	.mem_map_size = efx_ef10_mem_map_size,
 	.probe = efx_ef10_probe,
@@ -3329,13 +3592,14 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
 	.fini = efx_port_dummy_op_void,
 	.map_reset_reason = efx_mcdi_map_reset_reason,
 	.map_reset_flags = efx_ef10_map_reset_flags,
-	.reset = efx_mcdi_reset,
+	.reset = efx_ef10_reset,
 	.probe_port = efx_mcdi_port_probe,
 	.remove_port = efx_mcdi_port_remove,
 	.fini_dmaq = efx_ef10_fini_dmaq,
 	.describe_stats = efx_ef10_describe_stats,
 	.update_stats = efx_ef10_update_stats,
 	.start_stats = efx_mcdi_mac_start_stats,
+	.pull_stats = efx_mcdi_mac_pull_stats,
 	.stop_stats = efx_mcdi_mac_stop_stats,
 	.set_id_led = efx_mcdi_set_id_led,
 	.push_irq_moderation = efx_ef10_push_irq_moderation,
@@ -3345,7 +3609,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
 	.get_wol = efx_ef10_get_wol,
 	.set_wol = efx_ef10_set_wol,
 	.resume_wol = efx_port_dummy_op_void,
-	/* TODO: test_chip */
+	.test_chip = efx_ef10_test_chip,
 	.test_nvram = efx_mcdi_nvram_test_all,
 	.mcdi_request = efx_ef10_mcdi_request,
 	.mcdi_poll_response = efx_ef10_mcdi_poll_response,
@@ -3360,7 +3624,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
 	.tx_init = efx_ef10_tx_init,
 	.tx_remove = efx_ef10_tx_remove,
 	.tx_write = efx_ef10_tx_write,
-	.rx_push_indir_table = efx_ef10_rx_push_indir_table,
+	.rx_push_rss_config = efx_ef10_rx_push_rss_config,
 	.rx_probe = efx_ef10_rx_probe,
 	.rx_init = efx_ef10_rx_init,
 	.rx_remove = efx_ef10_rx_remove,
@@ -3397,11 +3661,14 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
 	.mtd_sync = efx_mcdi_mtd_sync,
 #endif
 	.ptp_write_host_time = efx_ef10_ptp_write_host_time,
+	.ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
+	.ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
 
 	.revision = EFX_REV_HUNT_A0,
 	.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
 	.rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
 	.rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+	.rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
 	.can_rx_scatter = true,
 	.always_rx_scatter = true,
 	.max_interrupt_mode = EFX_INT_MODE_MSIX,
@@ -3410,4 +3677,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
 			     NETIF_F_RXHASH | NETIF_F_NTUPLE),
 	.mcdi_max_ver = 2,
 	.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+	.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
+			    1 << HWTSTAMP_FILTER_ALL,
 };
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index fd844b53e385..83d464347021 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -83,6 +83,7 @@ const char *const efx_reset_type_names[] = {
 	[RESET_TYPE_DMA_ERROR]          = "DMA_ERROR",
 	[RESET_TYPE_TX_SKIP]            = "TX_SKIP",
 	[RESET_TYPE_MC_FAILURE]         = "MC_FAILURE",
+	[RESET_TYPE_MC_BIST]		= "MC_BIST",
 };
 
 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
@@ -91,6 +92,12 @@ const char *const efx_reset_type_names[] = {
  */
 static struct workqueue_struct *reset_workqueue;
 
+/* How often and how many times to poll for a reset while waiting for a
+ * BIST that another function started to complete.
+ */
+#define BIST_WAIT_DELAY_MS	100
+#define BIST_WAIT_DELAY_COUNT	100
+
 /**************************************************************************
  *
  * Configurable values
@@ -246,7 +253,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
 			efx_channel_get_rx_queue(channel);
 
 		efx_rx_flush_packet(channel);
-		efx_fast_push_rx_descriptors(rx_queue);
+		efx_fast_push_rx_descriptors(rx_queue, true);
 	}
 
 	return spent;
@@ -639,7 +646,9 @@ static void efx_start_datapath(struct efx_nic *efx)
 		efx_for_each_channel_rx_queue(rx_queue, channel) {
 			efx_init_rx_queue(rx_queue);
 			atomic_inc(&efx->active_queues);
-			efx_nic_generate_fill_event(rx_queue);
+			efx_stop_eventq(channel);
+			efx_fast_push_rx_descriptors(rx_queue, false);
+			efx_start_eventq(channel);
 		}
 
 		WARN_ON(channel->rx_pkt_n_frags);
@@ -1051,18 +1060,23 @@ static void efx_start_port(struct efx_nic *efx)
 	mutex_lock(&efx->mac_lock);
 	efx->port_enabled = true;
 
-	/* efx_mac_work() might have been scheduled after efx_stop_port(),
-	 * and then cancelled by efx_flush_all() */
+	/* Ensure MAC ingress/egress is enabled */
 	efx->type->reconfigure_mac(efx);
 
 	mutex_unlock(&efx->mac_lock);
 }
 
-/* Prevent efx_mac_work() and efx_monitor() from working */
+/* Cancel work for MAC reconfiguration, periodic hardware monitoring
+ * and the async self-test, wait for them to finish and prevent them
+ * being scheduled again.  This doesn't cover online resets, which
+ * should only be cancelled when removing the device.
+ */
 static void efx_stop_port(struct efx_nic *efx)
 {
 	netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
 
+	EFX_ASSERT_RESET_SERIALISED(efx);
+
 	mutex_lock(&efx->mac_lock);
 	efx->port_enabled = false;
 	mutex_unlock(&efx->mac_lock);
@@ -1070,6 +1084,10 @@ static void efx_stop_port(struct efx_nic *efx)
 	/* Serialise against efx_set_multicast_list() */
 	netif_addr_lock_bh(efx->net_dev);
 	netif_addr_unlock_bh(efx->net_dev);
+
+	cancel_delayed_work_sync(&efx->monitor_work);
+	efx_selftest_async_cancel(efx);
+	cancel_work_sync(&efx->mac_work);
 }
 
 static void efx_fini_port(struct efx_nic *efx)
@@ -1099,6 +1117,77 @@ static void efx_remove_port(struct efx_nic *efx)
  *
  **************************************************************************/
 
+static LIST_HEAD(efx_primary_list);
+static LIST_HEAD(efx_unassociated_list);
+
+static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right)
+{
+	return left->type == right->type &&
+		left->vpd_sn && right->vpd_sn &&
+		!strcmp(left->vpd_sn, right->vpd_sn);
+}
+
+static void efx_associate(struct efx_nic *efx)
+{
+	struct efx_nic *other, *next;
+
+	if (efx->primary == efx) {
+		/* Adding primary function; look for secondaries */
+
+		netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
+		list_add_tail(&efx->node, &efx_primary_list);
+
+		list_for_each_entry_safe(other, next, &efx_unassociated_list,
+					 node) {
+			if (efx_same_controller(efx, other)) {
+				list_del(&other->node);
+				netif_dbg(other, probe, other->net_dev,
+					  "moving to secondary list of %s %s\n",
+					  pci_name(efx->pci_dev),
+					  efx->net_dev->name);
+				list_add_tail(&other->node,
+					      &efx->secondary_list);
+				other->primary = efx;
+			}
+		}
+	} else {
+		/* Adding secondary function; look for primary */
+
+		list_for_each_entry(other, &efx_primary_list, node) {
+			if (efx_same_controller(efx, other)) {
+				netif_dbg(efx, probe, efx->net_dev,
+					  "adding to secondary list of %s %s\n",
+					  pci_name(other->pci_dev),
+					  other->net_dev->name);
+				list_add_tail(&efx->node,
+					      &other->secondary_list);
+				efx->primary = other;
+				return;
+			}
+		}
+
+		netif_dbg(efx, probe, efx->net_dev,
+			  "adding to unassociated list\n");
+		list_add_tail(&efx->node, &efx_unassociated_list);
+	}
+}
+
+static void efx_dissociate(struct efx_nic *efx)
+{
+	struct efx_nic *other, *next;
+
+	list_del(&efx->node);
+	efx->primary = NULL;
+
+	list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
+		list_del(&other->node);
+		netif_dbg(other, probe, other->net_dev,
+			  "moving to unassociated list\n");
+		list_add_tail(&other->node, &efx_unassociated_list);
+		other->primary = NULL;
+	}
+}
+
 /* This configures the PCI device to enable I/O and DMA. */
 static int efx_init_io(struct efx_nic *efx)
 {
@@ -1675,18 +1764,10 @@ static void efx_start_all(struct efx_nic *efx)
 	}
 
 	efx->type->start_stats(efx);
-}
-
-/* Flush all delayed work. Should only be called when no more delayed work
- * will be scheduled. This doesn't flush pending online resets (efx_reset),
- * since we're holding the rtnl_lock at this point. */
-static void efx_flush_all(struct efx_nic *efx)
-{
-	/* Make sure the hardware monitor and event self-test are stopped */
-	cancel_delayed_work_sync(&efx->monitor_work);
-	efx_selftest_async_cancel(efx);
-	/* Stop scheduled port reconfigurations */
-	cancel_work_sync(&efx->mac_work);
+	efx->type->pull_stats(efx);
+	spin_lock_bh(&efx->stats_lock);
+	efx->type->update_stats(efx, NULL, NULL);
+	spin_unlock_bh(&efx->stats_lock);
 }
 
 /* Quiesce the hardware and software data path, and regular activity
@@ -1702,12 +1783,16 @@ static void efx_stop_all(struct efx_nic *efx)
 	if (!efx->port_enabled)
 		return;
 
+	/* update stats before we go down so we can accurately count
+	 * rx_nodesc_drops
+	 */
+	efx->type->pull_stats(efx);
+	spin_lock_bh(&efx->stats_lock);
+	efx->type->update_stats(efx, NULL, NULL);
+	spin_unlock_bh(&efx->stats_lock);
 	efx->type->stop_stats(efx);
 	efx_stop_port(efx);
 
-	/* Flush efx_mac_work(), refill_workqueue, monitor_work */
-	efx_flush_all(efx);
-
 	/* Stop the kernel transmit interface.  This is only valid if
 	 * the device is stopped or detached; otherwise the watchdog
 	 * may fire immediately.
@@ -1851,7 +1936,9 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
 	struct mii_ioctl_data *data = if_mii(ifr);
 
 	if (cmd == SIOCSHWTSTAMP)
-		return efx_ptp_ioctl(efx, ifr, cmd);
+		return efx_ptp_set_ts_config(efx, ifr);
+	if (cmd == SIOCGHWTSTAMP)
+		return efx_ptp_get_ts_config(efx, ifr);
 
 	/* Convert phy_id from older PRTAD/DEVAD format */
 	if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
@@ -2064,7 +2151,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
 
 	/* If disabling RX n-tuple filtering, clear existing filters */
 	if (net_dev->features & ~data & NETIF_F_NTUPLE)
-		efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+		return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
 
 	return 0;
 }
@@ -2198,6 +2285,8 @@ static int efx_register_netdev(struct efx_nic *efx)
 			efx_init_tx_queue_core_txq(tx_queue);
 	}
 
+	efx_associate(efx);
+
 	rtnl_unlock();
 
 	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2211,6 +2300,7 @@ static int efx_register_netdev(struct efx_nic *efx)
 
 fail_registered:
 	rtnl_lock();
+	efx_dissociate(efx);
 	unregister_netdevice(net_dev);
 fail_locked:
 	efx->state = STATE_UNINIT;
@@ -2387,6 +2477,24 @@ int efx_try_recovery(struct efx_nic *efx)
 	return 0;
 }
 
+static void efx_wait_for_bist_end(struct efx_nic *efx)
+{
+	int i;
+
+	for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
+		if (efx_mcdi_poll_reboot(efx))
+			goto out;
+		msleep(BIST_WAIT_DELAY_MS);
+	}
+
+	netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
+out:
+	/* Either way unset the BIST flag. If we found no reboot we probably
+	 * won't recover, but we should try.
+	 */
+	efx->mc_bist_for_other_fn = false;
+}
+
 /* The worker thread exists so that code that cannot sleep can
  * schedule a reset for later.
  */
@@ -2399,6 +2507,9 @@ static void efx_reset_work(struct work_struct *data)
 	pending = ACCESS_ONCE(efx->reset_pending);
 	method = fls(pending) - 1;
 
+	if (method == RESET_TYPE_MC_BIST)
+		efx_wait_for_bist_end(efx);
+
 	if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
 	     method == RESET_TYPE_RECOVER_OR_ALL) &&
 	    efx_try_recovery(efx))
@@ -2437,6 +2548,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
 	case RESET_TYPE_WORLD:
 	case RESET_TYPE_DISABLE:
 	case RESET_TYPE_RECOVER_OR_DISABLE:
+	case RESET_TYPE_MC_BIST:
 		method = type;
 		netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
 			  RESET_TYPE(method));
@@ -2530,6 +2642,8 @@ static int efx_init_struct(struct efx_nic *efx,
 	int i;
 
 	/* Initialise common structures */
+	INIT_LIST_HEAD(&efx->node);
+	INIT_LIST_HEAD(&efx->secondary_list);
 	spin_lock_init(&efx->biu_lock);
 #ifdef CONFIG_SFC_MTD
 	INIT_LIST_HEAD(&efx->mtd_list);
@@ -2548,6 +2662,8 @@ static int efx_init_struct(struct efx_nic *efx,
 		NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
 	efx->rx_packet_hash_offset =
 		efx->type->rx_hash_offset - efx->type->rx_prefix_size;
+	efx->rx_packet_ts_offset =
+		efx->type->rx_ts_offset - efx->type->rx_prefix_size;
 	spin_lock_init(&efx->stats_lock);
 	mutex_init(&efx->mac_lock);
 	efx->phy_op = &efx_dummy_phy_operations;
@@ -2588,6 +2704,8 @@ static void efx_fini_struct(struct efx_nic *efx)
 	for (i = 0; i < EFX_MAX_CHANNELS; i++)
 		kfree(efx->channel[i]);
 
+	kfree(efx->vpd_sn);
+
 	if (efx->workqueue) {
 		destroy_workqueue(efx->workqueue);
 		efx->workqueue = NULL;
@@ -2632,6 +2750,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
 
 	/* Mark the NIC as fini, then stop the interface */
 	rtnl_lock();
+	efx_dissociate(efx);
 	dev_close(efx->net_dev);
 	efx_disable_interrupts(efx);
 	rtnl_unlock();
@@ -2647,7 +2766,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
 	netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
 
 	efx_fini_struct(efx);
-	pci_set_drvdata(pci_dev, NULL);
 	free_netdev(efx->net_dev);
 
 	pci_disable_pcie_error_reporting(pci_dev);
@@ -2659,12 +2777,12 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
  * always appear within the first 512 bytes.
  */
 #define SFC_VPD_LEN 512
-static void efx_print_product_vpd(struct efx_nic *efx)
+static void efx_probe_vpd_strings(struct efx_nic *efx)
 {
 	struct pci_dev *dev = efx->pci_dev;
 	char vpd_data[SFC_VPD_LEN];
 	ssize_t vpd_size;
-	int i, j;
+	int ro_start, ro_size, i, j;
 
 	/* Get the vpd data from the device */
 	vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
@@ -2674,14 +2792,15 @@ static void efx_print_product_vpd(struct efx_nic *efx)
 	}
 
 	/* Get the Read only section */
-	i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
-	if (i < 0) {
+	ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
+	if (ro_start < 0) {
 		netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
 		return;
 	}
 
-	j = pci_vpd_lrdt_size(&vpd_data[i]);
-	i += PCI_VPD_LRDT_TAG_SIZE;
+	ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
+	j = ro_size;
+	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
 	if (i + j > vpd_size)
 		j = vpd_size - i;
 
@@ -2701,6 +2820,27 @@ static void efx_print_product_vpd(struct efx_nic *efx)
 
 	netif_info(efx, drv, efx->net_dev,
 		   "Part Number : %.*s\n", j, &vpd_data[i]);
+
+	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
+	j = ro_size;
+	i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
+	if (i < 0) {
+		netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
+		return;
+	}
+
+	j = pci_vpd_info_field_size(&vpd_data[i]);
+	i += PCI_VPD_INFO_FLD_HDR_SIZE;
+	if (i + j > vpd_size) {
+		netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
+		return;
+	}
+
+	efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
+	if (!efx->vpd_sn)
+		return;
+
+	snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
 }
 
 
@@ -2797,7 +2937,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
 	netif_info(efx, probe, efx->net_dev,
 		   "Solarflare NIC detected\n");
 
-	efx_print_product_vpd(efx);
+	efx_probe_vpd_strings(efx);
 
 	/* Set up basic I/O (BAR mappings etc) */
 	rc = efx_init_io(efx);
@@ -2841,7 +2981,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
  fail2:
 	efx_fini_struct(efx);
  fail1:
-	pci_set_drvdata(pci_dev, NULL);
 	WARN_ON(rc > 0);
 	netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
 	free_netdev(net_dev);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index b8235ee5d7d7..dbd7b78fe01c 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -37,7 +37,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
 void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
 void efx_rx_slow_fill(unsigned long context);
 void __efx_rx_packet(struct efx_channel *channel);
 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
@@ -66,6 +66,9 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
 #define EFX_RXQ_MIN_ENT		128U
 #define EFX_TXQ_MIN_ENT(efx)	(2 * efx_tx_max_skb_descs(efx))
 
+#define EFX_TXQ_MAX_ENT(efx)	(EFX_WORKAROUND_35388(efx) ? \
+				 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
+
 /* Filters */
 
 /**
@@ -134,17 +137,6 @@ efx_filter_get_filter_safe(struct efx_nic *efx,
 	return efx->type->filter_get_safe(efx, priority, filter_id, spec);
 }
 
-/**
- * efx_farch_filter_clear_rx - remove RX filters by priority
- * @efx: NIC from which to remove the filters
- * @priority: Maximum priority to remove
- */
-static inline void efx_filter_clear_rx(struct efx_nic *efx,
-				       enum efx_filter_priority priority)
-{
-	return efx->type->filter_clear_rx(efx, priority);
-}
-
 static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
 					   enum efx_filter_priority priority)
 {
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index 7fdfee019092..75ef7ef6450b 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -165,6 +165,7 @@ enum reset_type {
 	RESET_TYPE_DMA_ERROR,
 	RESET_TYPE_TX_SKIP,
 	RESET_TYPE_MC_FAILURE,
+	RESET_TYPE_MC_BIST,
 	RESET_TYPE_MAX,
 };
 
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 1f529fa2edb1..229428915aa8 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -318,6 +318,8 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
 			      "eventq.int", NULL);
 	}
 
+	efx_fill_test(n++, strings, data, &tests->memory,
+		      "core", 0, "memory", NULL);
 	efx_fill_test(n++, strings, data, &tests->registers,
 		      "core", 0, "registers", NULL);
 
@@ -357,7 +359,8 @@ static int efx_ethtool_get_sset_count(struct net_device *net_dev,
 	switch (string_set) {
 	case ETH_SS_STATS:
 		return efx->type->describe_stats(efx, NULL) +
-			EFX_ETHTOOL_SW_STAT_COUNT;
+			EFX_ETHTOOL_SW_STAT_COUNT +
+			efx_ptp_describe_stats(efx, NULL);
 	case ETH_SS_TEST:
 		return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
 	default:
@@ -378,6 +381,8 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
 		for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
 			strlcpy(strings + i * ETH_GSTRING_LEN,
 				efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
+		strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
+		efx_ptp_describe_stats(efx, strings);
 		break;
 	case ETH_SS_TEST:
 		efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
@@ -427,8 +432,11 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
 			break;
 		}
 	}
+	data += EFX_ETHTOOL_SW_STAT_COUNT;
 
 	spin_unlock_bh(&efx->stats_lock);
+
+	efx_ptp_update_stats(efx, data);
 }
 
 static void efx_ethtool_self_test(struct net_device *net_dev,
@@ -583,7 +591,7 @@ static void efx_ethtool_get_ringparam(struct net_device *net_dev,
 	struct efx_nic *efx = netdev_priv(net_dev);
 
 	ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
-	ring->tx_max_pending = EFX_MAX_DMAQ_SIZE;
+	ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
 	ring->rx_pending = efx->rxq_entries;
 	ring->tx_pending = efx->txq_entries;
 }
@@ -596,7 +604,7 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
 
 	if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
 	    ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
-	    ring->tx_pending > EFX_MAX_DMAQ_SIZE)
+	    ring->tx_pending > EFX_TXQ_MAX_ENT(efx))
 		return -EINVAL;
 
 	if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
@@ -1032,7 +1040,7 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
 	struct efx_nic *efx = netdev_priv(net_dev);
 
 	memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
-	efx_nic_push_rx_indir_table(efx);
+	efx->type->rx_push_rss_config(efx);
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index ff5d322b9b49..18d6f761f4d0 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -469,6 +469,24 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
 }
 /**************************************************************************
  *
+ * RSS
+ *
+ **************************************************************************
+ */
+
+static void falcon_b0_rx_push_rss_config(struct efx_nic *efx)
+{
+	efx_oword_t temp;
+
+	/* Set hash key for IPv4 */
+	memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+	efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
+	efx_farch_rx_push_indir_table(efx);
+}
+
+/**************************************************************************
+ *
  * EEPROM/flash
  *
  **************************************************************************
@@ -2247,6 +2265,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
 	struct falcon_board *board;
 	int rc;
 
+	efx->primary = efx; /* only one usable function per controller */
+
 	/* Allocate storage for hardware specific data */
 	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
 	if (!nic_data)
@@ -2482,9 +2502,7 @@ static int falcon_init_nic(struct efx_nic *efx)
 	falcon_init_rx_cfg(efx);
 
 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
-		/* Set hash key for IPv4 */
-		memcpy(&temp, efx->rx_hash_key, sizeof(temp));
-		efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+		falcon_b0_rx_push_rss_config(efx);
 
 		/* Set destination of both TX and RX Flush events */
 		EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
@@ -2593,6 +2611,14 @@ void falcon_start_nic_stats(struct efx_nic *efx)
 	spin_unlock_bh(&efx->stats_lock);
 }
 
+/* We don't acutally pull stats on falcon. Wait 10ms so that
+ * they arrive when we call this just after start_stats
+ */
+static void falcon_pull_nic_stats(struct efx_nic *efx)
+{
+	msleep(10);
+}
+
 void falcon_stop_nic_stats(struct efx_nic *efx)
 {
 	struct falcon_nic_data *nic_data = efx->nic_data;
@@ -2672,6 +2698,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
 	.describe_stats = falcon_describe_nic_stats,
 	.update_stats = falcon_update_nic_stats,
 	.start_stats = falcon_start_nic_stats,
+	.pull_stats = falcon_pull_nic_stats,
 	.stop_stats = falcon_stop_nic_stats,
 	.set_id_led = falcon_set_id_led,
 	.push_irq_moderation = falcon_push_irq_moderation,
@@ -2692,7 +2719,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
 	.tx_init = efx_farch_tx_init,
 	.tx_remove = efx_farch_tx_remove,
 	.tx_write = efx_farch_tx_write,
-	.rx_push_indir_table = efx_farch_rx_push_indir_table,
+	.rx_push_rss_config = efx_port_dummy_op_void,
 	.rx_probe = efx_farch_rx_probe,
 	.rx_init = efx_farch_rx_init,
 	.rx_remove = efx_farch_rx_remove,
@@ -2765,6 +2792,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
 	.describe_stats = falcon_describe_nic_stats,
 	.update_stats = falcon_update_nic_stats,
 	.start_stats = falcon_start_nic_stats,
+	.pull_stats = falcon_pull_nic_stats,
 	.stop_stats = falcon_stop_nic_stats,
 	.set_id_led = falcon_set_id_led,
 	.push_irq_moderation = falcon_push_irq_moderation,
@@ -2786,7 +2814,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
 	.tx_init = efx_farch_tx_init,
 	.tx_remove = efx_farch_tx_remove,
 	.tx_write = efx_farch_tx_write,
-	.rx_push_indir_table = efx_farch_rx_push_indir_table,
+	.rx_push_rss_config = falcon_b0_rx_push_rss_config,
 	.rx_probe = efx_farch_rx_probe,
 	.rx_init = efx_farch_rx_init,
 	.rx_remove = efx_farch_rx_remove,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index c0907d884d75..f72489a105ca 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -1147,7 +1147,7 @@ static void efx_farch_handle_generated_event(struct efx_channel *channel,
 		/* The queue must be empty, so we won't receive any rx
 		 * events, so efx_process_channel() won't refill the
 		 * queue. Refill it here */
-		efx_fast_push_rx_descriptors(rx_queue);
+		efx_fast_push_rx_descriptors(rx_queue, true);
 	} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
 		efx_farch_handle_drain_event(channel);
 	} else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
@@ -1618,8 +1618,7 @@ void efx_farch_rx_push_indir_table(struct efx_nic *efx)
 	size_t i = 0;
 	efx_dword_t dword;
 
-	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
-		return;
+	BUG_ON(efx_nic_rev(efx) < EFX_REV_FALCON_B0);
 
 	BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
 		     FR_BZ_RX_INDIRECTION_TBL_ROWS);
@@ -1745,8 +1744,6 @@ void efx_farch_init_common(struct efx_nic *efx)
 	EFX_INVERT_OWORD(temp);
 	efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
 
-	efx_farch_rx_push_indir_table(efx);
-
 	/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
 	 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
 	 */
@@ -2187,14 +2184,14 @@ efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
 }
 
 static void
-efx_farch_filter_init_rx_for_stack(struct efx_nic *efx,
-				   struct efx_farch_filter_spec *spec)
+efx_farch_filter_init_rx_auto(struct efx_nic *efx,
+			      struct efx_farch_filter_spec *spec)
 {
 	/* If there's only one channel then disable RSS for non VF
 	 * traffic, thereby allowing VFs to use RSS when the PF can't.
 	 */
-	spec->priority = EFX_FILTER_PRI_REQUIRED;
-	spec->flags = (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_STACK |
+	spec->priority = EFX_FILTER_PRI_AUTO;
+	spec->flags = (EFX_FILTER_FLAG_RX |
 		       (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
 		       (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
 	spec->dmaq_id = 0;
@@ -2459,20 +2456,13 @@ s32 efx_farch_filter_insert(struct efx_nic *efx,
 			rc = -EEXIST;
 			goto out;
 		}
-		if (spec.priority < saved_spec->priority &&
-		    !(saved_spec->priority == EFX_FILTER_PRI_REQUIRED &&
-		      saved_spec->flags & EFX_FILTER_FLAG_RX_STACK)) {
+		if (spec.priority < saved_spec->priority) {
 			rc = -EPERM;
 			goto out;
 		}
-		if (spec.flags & EFX_FILTER_FLAG_RX_STACK) {
-			/* Just make sure it won't be removed */
-			saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
-			rc = 0;
-			goto out;
-		}
-		/* Retain the RX_STACK flag */
-		spec.flags |= saved_spec->flags & EFX_FILTER_FLAG_RX_STACK;
+		if (saved_spec->priority == EFX_FILTER_PRI_AUTO ||
+		    saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO)
+			spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
 	}
 
 	/* Insert the filter */
@@ -2553,11 +2543,11 @@ static int efx_farch_filter_remove(struct efx_nic *efx,
 	struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
 
 	if (!test_bit(filter_idx, table->used_bitmap) ||
-	    spec->priority > priority)
+	    spec->priority != priority)
 		return -ENOENT;
 
-	if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
-		efx_farch_filter_init_rx_for_stack(efx, spec);
+	if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
+		efx_farch_filter_init_rx_auto(efx, spec);
 		efx_farch_filter_push_rx_config(efx);
 	} else {
 		efx_farch_filter_table_clear_entry(efx, table, filter_idx);
@@ -2640,12 +2630,15 @@ efx_farch_filter_table_clear(struct efx_nic *efx,
 	unsigned int filter_idx;
 
 	spin_lock_bh(&efx->filter_lock);
-	for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
-		efx_farch_filter_remove(efx, table, filter_idx, priority);
+	for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
+		if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO)
+			efx_farch_filter_remove(efx, table,
+						filter_idx, priority);
+	}
 	spin_unlock_bh(&efx->filter_lock);
 }
 
-void efx_farch_filter_clear_rx(struct efx_nic *efx,
+int efx_farch_filter_clear_rx(struct efx_nic *efx,
 			       enum efx_filter_priority priority)
 {
 	efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
@@ -2654,6 +2647,7 @@ void efx_farch_filter_clear_rx(struct efx_nic *efx,
 				     priority);
 	efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
 				     priority);
+	return 0;
 }
 
 u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
@@ -2822,7 +2816,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
 		for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
 			spec = &table->spec[i];
 			spec->type = EFX_FARCH_FILTER_UC_DEF + i;
-			efx_farch_filter_init_rx_for_stack(efx, spec);
+			efx_farch_filter_init_rx_auto(efx, spec);
 			__set_bit(i, table->used_bitmap);
 		}
 	}
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 63c77a557178..3ef298d3c47e 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -59,12 +59,16 @@ enum efx_filter_match_flags {
 /**
  * enum efx_filter_priority - priority of a hardware filter specification
  * @EFX_FILTER_PRI_HINT: Performance hint
+ * @EFX_FILTER_PRI_AUTO: Automatic filter based on device address list
+ *	or hardware requirements.  This may only be used by the filter
+ *	implementation for each NIC type.
  * @EFX_FILTER_PRI_MANUAL: Manually configured filter
  * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level
  *	networking and SR-IOV)
  */
 enum efx_filter_priority {
 	EFX_FILTER_PRI_HINT = 0,
+	EFX_FILTER_PRI_AUTO,
 	EFX_FILTER_PRI_MANUAL,
 	EFX_FILTER_PRI_REQUIRED,
 };
@@ -78,19 +82,18 @@ enum efx_filter_priority {
  *	according to the indirection table.
  * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
  *	queue.
- * @EFX_FILTER_FLAG_RX_STACK: Indicates a filter inserted for the
- *	network stack.  The filter must have a priority of
- *	%EFX_FILTER_PRI_REQUIRED.  It can be steered by a replacement
- *	request with priority %EFX_FILTER_PRI_MANUAL, and a removal
- *	request with priority %EFX_FILTER_PRI_MANUAL will reset the
- *	steering (but not remove the filter).
+ * @EFX_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is
+ *	overriding an automatic filter (priority
+ *	%EFX_FILTER_PRI_AUTO).  This may only be set by the filter
+ *	implementation for each type.  A removal request will restore
+ *	the automatic filter in its place.
  * @EFX_FILTER_FLAG_RX: Filter is for RX
  * @EFX_FILTER_FLAG_TX: Filter is for TX
  */
 enum efx_filter_flags {
 	EFX_FILTER_FLAG_RX_RSS = 0x01,
 	EFX_FILTER_FLAG_RX_SCATTER = 0x02,
-	EFX_FILTER_FLAG_RX_STACK = 0x04,
+	EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04,
 	EFX_FILTER_FLAG_RX = 0x08,
 	EFX_FILTER_FLAG_TX = 0x10,
 };
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 4b0bd8a1514d..eb59abb57e85 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -42,6 +42,7 @@ struct efx_mcdi_async_param {
 	unsigned int cmd;
 	size_t inlen;
 	size_t outlen;
+	bool quiet;
 	efx_mcdi_async_completer *complete;
 	unsigned long cookie;
 	/* followed by request/response buffer */
@@ -101,6 +102,10 @@ int efx_mcdi_init(struct efx_nic *efx)
 		netif_err(efx, probe, efx->net_dev,
 			  "Host already registered with MCPU\n");
 
+	if (efx->mcdi->fn_flags &
+	    (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
+		efx->primary = efx;
+
 	return 0;
 }
 
@@ -191,6 +196,8 @@ static int efx_mcdi_errno(unsigned int mcdi_err)
 	TRANSLATE_ERROR(EALREADY);
 	TRANSLATE_ERROR(ENOSPC);
 #undef TRANSLATE_ERROR
+	case MC_CMD_ERR_ENOTSUP:
+		return -EOPNOTSUPP;
 	case MC_CMD_ERR_ALLOC_FAIL:
 		return -ENOBUFS;
 	case MC_CMD_ERR_MAC_EXIST:
@@ -402,8 +409,9 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
 {
 	struct efx_nic *efx = mcdi->efx;
 	struct efx_mcdi_async_param *async;
-	size_t hdr_len, data_len;
+	size_t hdr_len, data_len, err_len;
 	efx_dword_t *outbuf;
+	MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
 	int rc;
 
 	if (cmpxchg(&mcdi->state,
@@ -444,6 +452,13 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
 	outbuf = (efx_dword_t *)(async + 1);
 	efx->type->mcdi_read_response(efx, outbuf, hdr_len,
 				      min(async->outlen, data_len));
+	if (!timeout && rc && !async->quiet) {
+		err_len = min(sizeof(errbuf), data_len);
+		efx->type->mcdi_read_response(efx, errbuf, hdr_len,
+					      sizeof(errbuf));
+		efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf,
+				       err_len, rc);
+	}
 	async->complete(efx, async->cookie, rc, outbuf, data_len);
 	kfree(async);
 
@@ -519,18 +534,129 @@ efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
 	return 0;
 }
 
+static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+				efx_dword_t *outbuf, size_t outlen,
+				size_t *outlen_actual, bool quiet)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+	MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+	int rc;
+
+	if (mcdi->mode == MCDI_MODE_POLL)
+		rc = efx_mcdi_poll(efx);
+	else
+		rc = efx_mcdi_await_completion(efx);
+
+	if (rc != 0) {
+		netif_err(efx, hw, efx->net_dev,
+			  "MC command 0x%x inlen %d mode %d timed out\n",
+			  cmd, (int)inlen, mcdi->mode);
+
+		if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
+			netif_err(efx, hw, efx->net_dev,
+				  "MCDI request was completed without an event\n");
+			rc = 0;
+		}
+
+		/* Close the race with efx_mcdi_ev_cpl() executing just too late
+		 * and completing a request we've just cancelled, by ensuring
+		 * that the seqno check therein fails.
+		 */
+		spin_lock_bh(&mcdi->iface_lock);
+		++mcdi->seqno;
+		++mcdi->credits;
+		spin_unlock_bh(&mcdi->iface_lock);
+	}
+
+	if (rc != 0) {
+		if (outlen_actual)
+			*outlen_actual = 0;
+	} else {
+		size_t hdr_len, data_len, err_len;
+
+		/* At the very least we need a memory barrier here to ensure
+		 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
+		 * a spurious efx_mcdi_ev_cpl() running concurrently by
+		 * acquiring the iface_lock. */
+		spin_lock_bh(&mcdi->iface_lock);
+		rc = mcdi->resprc;
+		hdr_len = mcdi->resp_hdr_len;
+		data_len = mcdi->resp_data_len;
+		err_len = min(sizeof(errbuf), data_len);
+		spin_unlock_bh(&mcdi->iface_lock);
+
+		BUG_ON(rc > 0);
+
+		efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+					      min(outlen, data_len));
+		if (outlen_actual)
+			*outlen_actual = data_len;
+
+		efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len);
+
+		if (cmd == MC_CMD_REBOOT && rc == -EIO) {
+			/* Don't reset if MC_CMD_REBOOT returns EIO */
+		} else if (rc == -EIO || rc == -EINTR) {
+			netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
+				  -rc);
+			efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+		} else if (rc && !quiet) {
+			efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len,
+					       rc);
+		}
+
+		if (rc == -EIO || rc == -EINTR) {
+			msleep(MCDI_STATUS_SLEEP_MS);
+			efx_mcdi_poll_reboot(efx);
+			mcdi->new_epoch = true;
+		}
+	}
+
+	efx_mcdi_release(mcdi);
+	return rc;
+}
+
+static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
+			 const efx_dword_t *inbuf, size_t inlen,
+			 efx_dword_t *outbuf, size_t outlen,
+			 size_t *outlen_actual, bool quiet)
+{
+	int rc;
+
+	rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+	if (rc) {
+		if (outlen_actual)
+			*outlen_actual = 0;
+		return rc;
+	}
+	return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+				    outlen_actual, quiet);
+}
+
 int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
 		 const efx_dword_t *inbuf, size_t inlen,
 		 efx_dword_t *outbuf, size_t outlen,
 		 size_t *outlen_actual)
 {
-	int rc;
+	return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
+			     outlen_actual, false);
+}
 
-	rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
-	if (rc)
-		return rc;
-	return efx_mcdi_rpc_finish(efx, cmd, inlen,
-				   outbuf, outlen, outlen_actual);
+/* Normally, on receiving an error code in the MCDI response,
+ * efx_mcdi_rpc will log an error message containing (among other
+ * things) the raw error code, by means of efx_mcdi_display_error.
+ * This _quiet version suppresses that; if the caller wishes to log
+ * the error conditionally on the return code, it should call this
+ * function and is then responsible for calling efx_mcdi_display_error
+ * as needed.
+ */
+int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
+		       const efx_dword_t *inbuf, size_t inlen,
+		       efx_dword_t *outbuf, size_t outlen,
+		       size_t *outlen_actual)
+{
+	return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
+			     outlen_actual, true);
 }
 
 int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
@@ -543,35 +669,19 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
 	if (rc)
 		return rc;
 
+	if (efx->mc_bist_for_other_fn)
+		return -ENETDOWN;
+
 	efx_mcdi_acquire_sync(mcdi);
 	efx_mcdi_send_request(efx, cmd, inbuf, inlen);
 	return 0;
 }
 
-/**
- * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
- * @efx: NIC through which to issue the command
- * @cmd: Command type number
- * @inbuf: Command parameters
- * @inlen: Length of command parameters, in bytes
- * @outlen: Length to allocate for response buffer, in bytes
- * @complete: Function to be called on completion or cancellation.
- * @cookie: Arbitrary value to be passed to @complete.
- *
- * This function does not sleep and therefore may be called in atomic
- * context.  It will fail if event queues are disabled or if MCDI
- * event completions have been disabled due to an error.
- *
- * If it succeeds, the @complete function will be called exactly once
- * in atomic context, when one of the following occurs:
- * (a) the completion event is received (in NAPI context)
- * (b) event queues are disabled (in the process that disables them)
- * (c) the request times-out (in timer context)
- */
-int
-efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
-		   const efx_dword_t *inbuf, size_t inlen, size_t outlen,
-		   efx_mcdi_async_completer *complete, unsigned long cookie)
+static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+			       const efx_dword_t *inbuf, size_t inlen,
+			       size_t outlen,
+			       efx_mcdi_async_completer *complete,
+			       unsigned long cookie, bool quiet)
 {
 	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
 	struct efx_mcdi_async_param *async;
@@ -581,6 +691,9 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
 	if (rc)
 		return rc;
 
+	if (efx->mc_bist_for_other_fn)
+		return -ENETDOWN;
+
 	async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
 			GFP_ATOMIC);
 	if (!async)
@@ -589,6 +702,7 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
 	async->cmd = cmd;
 	async->inlen = inlen;
 	async->outlen = outlen;
+	async->quiet = quiet;
 	async->complete = complete;
 	async->cookie = cookie;
 	memcpy(async + 1, inbuf, inlen);
@@ -617,79 +731,73 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
 	return rc;
 }
 
+/**
+ * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
+ * @efx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes
+ * @outlen: Length to allocate for response buffer, in bytes
+ * @complete: Function to be called on completion or cancellation.
+ * @cookie: Arbitrary value to be passed to @complete.
+ *
+ * This function does not sleep and therefore may be called in atomic
+ * context.  It will fail if event queues are disabled or if MCDI
+ * event completions have been disabled due to an error.
+ *
+ * If it succeeds, the @complete function will be called exactly once
+ * in atomic context, when one of the following occurs:
+ * (a) the completion event is received (in NAPI context)
+ * (b) event queues are disabled (in the process that disables them)
+ * (c) the request times-out (in timer context)
+ */
+int
+efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+		   const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+		   efx_mcdi_async_completer *complete, unsigned long cookie)
+{
+	return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
+				   cookie, false);
+}
+
+int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
+			     const efx_dword_t *inbuf, size_t inlen,
+			     size_t outlen, efx_mcdi_async_completer *complete,
+			     unsigned long cookie)
+{
+	return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
+				   cookie, true);
+}
+
 int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
 			efx_dword_t *outbuf, size_t outlen,
 			size_t *outlen_actual)
 {
-	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
-	int rc;
-
-	if (mcdi->mode == MCDI_MODE_POLL)
-		rc = efx_mcdi_poll(efx);
-	else
-		rc = efx_mcdi_await_completion(efx);
-
-	if (rc != 0) {
-		netif_err(efx, hw, efx->net_dev,
-			  "MC command 0x%x inlen %d mode %d timed out\n",
-			  cmd, (int)inlen, mcdi->mode);
-
-		if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
-			netif_err(efx, hw, efx->net_dev,
-				  "MCDI request was completed without an event\n");
-			rc = 0;
-		}
-
-		/* Close the race with efx_mcdi_ev_cpl() executing just too late
-		 * and completing a request we've just cancelled, by ensuring
-		 * that the seqno check therein fails.
-		 */
-		spin_lock_bh(&mcdi->iface_lock);
-		++mcdi->seqno;
-		++mcdi->credits;
-		spin_unlock_bh(&mcdi->iface_lock);
-	}
-
-	if (rc == 0) {
-		size_t hdr_len, data_len;
-
-		/* At the very least we need a memory barrier here to ensure
-		 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
-		 * a spurious efx_mcdi_ev_cpl() running concurrently by
-		 * acquiring the iface_lock. */
-		spin_lock_bh(&mcdi->iface_lock);
-		rc = mcdi->resprc;
-		hdr_len = mcdi->resp_hdr_len;
-		data_len = mcdi->resp_data_len;
-		spin_unlock_bh(&mcdi->iface_lock);
-
-		BUG_ON(rc > 0);
+	return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+				    outlen_actual, false);
+}
 
-		if (rc == 0) {
-			efx->type->mcdi_read_response(efx, outbuf, hdr_len,
-						      min(outlen, data_len));
-			if (outlen_actual != NULL)
-				*outlen_actual = data_len;
-		} else if (cmd == MC_CMD_REBOOT && rc == -EIO)
-			; /* Don't reset if MC_CMD_REBOOT returns EIO */
-		else if (rc == -EIO || rc == -EINTR) {
-			netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
-				  -rc);
-			efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
-		} else
-			netif_dbg(efx, hw, efx->net_dev,
-				  "MC command 0x%x inlen %d failed rc=%d\n",
-				  cmd, (int)inlen, -rc);
+int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
+			      efx_dword_t *outbuf, size_t outlen,
+			      size_t *outlen_actual)
+{
+	return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+				    outlen_actual, true);
+}
 
-		if (rc == -EIO || rc == -EINTR) {
-			msleep(MCDI_STATUS_SLEEP_MS);
-			efx_mcdi_poll_reboot(efx);
-			mcdi->new_epoch = true;
-		}
-	}
+void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
+			    size_t inlen, efx_dword_t *outbuf,
+			    size_t outlen, int rc)
+{
+	int code = 0, err_arg = 0;
 
-	efx_mcdi_release(mcdi);
-	return rc;
+	if (outlen >= MC_CMD_ERR_CODE_OFST + 4)
+		code = MCDI_DWORD(outbuf, ERR_CODE);
+	if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
+		err_arg = MCDI_DWORD(outbuf, ERR_ARG);
+	netif_err(efx, hw, efx->net_dev,
+		  "MC command 0x%x inlen %d failed rc=%d (raw=%d) arg=%d\n",
+		  cmd, (int)inlen, rc, code, err_arg);
 }
 
 /* Switch to polled MCDI completions.  This can be called in various
@@ -834,6 +942,30 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
 	spin_unlock(&mcdi->iface_lock);
 }
 
+/* The MC is going down in to BIST mode. set the BIST flag to block
+ * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset
+ * (which doesn't actually execute a reset, it waits for the controlling
+ * function to reset it).
+ */
+static void efx_mcdi_ev_bist(struct efx_nic *efx)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+	spin_lock(&mcdi->iface_lock);
+	efx->mc_bist_for_other_fn = true;
+	if (efx_mcdi_complete_sync(mcdi)) {
+		if (mcdi->mode == MCDI_MODE_EVENTS) {
+			mcdi->resprc = -EIO;
+			mcdi->resp_hdr_len = 0;
+			mcdi->resp_data_len = 0;
+			++mcdi->credits;
+		}
+	}
+	mcdi->new_epoch = true;
+	efx_schedule_reset(efx, RESET_TYPE_MC_BIST);
+	spin_unlock(&mcdi->iface_lock);
+}
+
 /* Called from  falcon_process_eventq for MCDI events */
 void efx_mcdi_process_event(struct efx_channel *channel,
 			    efx_qword_t *event)
@@ -867,14 +999,18 @@ void efx_mcdi_process_event(struct efx_channel *channel,
 		efx_mcdi_sensor_event(efx, event);
 		break;
 	case MCDI_EVENT_CODE_SCHEDERR:
-		netif_info(efx, hw, efx->net_dev,
-			   "MC Scheduler error address=0x%x\n", data);
+		netif_dbg(efx, hw, efx->net_dev,
+			  "MC Scheduler alert (0x%x)\n", data);
 		break;
 	case MCDI_EVENT_CODE_REBOOT:
 	case MCDI_EVENT_CODE_MC_REBOOT:
 		netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
 		efx_mcdi_ev_death(efx, -EIO);
 		break;
+	case MCDI_EVENT_CODE_MC_BIST:
+		netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n");
+		efx_mcdi_ev_bist(efx);
+		break;
 	case MCDI_EVENT_CODE_MAC_STATS_DMA:
 		/* MAC stats are gather lazily.  We can ignore this. */
 		break;
@@ -886,6 +1022,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
 	case MCDI_EVENT_CODE_PTP_PPS:
 		efx_ptp_event(efx, event);
 		break;
+	case MCDI_EVENT_CODE_PTP_TIME:
+		efx_time_sync_event(channel, event);
+		break;
 	case MCDI_EVENT_CODE_TX_FLUSH:
 	case MCDI_EVENT_CODE_RX_FLUSH:
 		/* Two flush events will be sent: one to the same event
@@ -1000,13 +1139,27 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
 		goto fail;
 	}
 
+	if (driver_operating) {
+		if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) {
+			efx->mcdi->fn_flags =
+				MCDI_DWORD(outbuf,
+					   DRV_ATTACH_EXT_OUT_FUNC_FLAGS);
+		} else {
+			/* Synthesise flags for Siena */
+			efx->mcdi->fn_flags =
+				1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+				1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED |
+				(efx_port_num(efx) == 0) <<
+				MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY;
+		}
+	}
+
 	/* We currently assume we have control of the external link
 	 * and are completely trusted by firmware.  Abort probing
 	 * if that's not true for this function.
 	 */
 	if (driver_operating &&
-	    outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN &&
-	    (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) &
+	    (efx->mcdi->fn_flags &
 	     (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
 	      1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
 	    (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
@@ -1097,13 +1250,6 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
 
 	rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
 			  NULL, 0, NULL);
-	if (rc)
-		goto fail;
-
-	return 0;
-
-fail:
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -1220,7 +1366,7 @@ fail1:
 static int efx_mcdi_read_assertion(struct efx_nic *efx)
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
-	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
+	MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
 	unsigned int flags, index;
 	const char *reason;
 	size_t outlen;
@@ -1235,13 +1381,17 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
 	retry = 2;
 	do {
 		MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
-		rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
-				  inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
-				  outbuf, sizeof(outbuf), &outlen);
+		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
+					inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
+					outbuf, sizeof(outbuf), &outlen);
 	} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
 
-	if (rc)
+	if (rc) {
+		efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS,
+				       MC_CMD_GET_ASSERTS_IN_LEN, outbuf,
+				       outlen, rc);
 		return rc;
+	}
 	if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
 		return -EIO;
 
@@ -1319,17 +1469,18 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
 
 	rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
 			  NULL, 0, NULL);
-	if (rc)
-		netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
-			  __func__, rc);
 }
 
-static int efx_mcdi_reset_port(struct efx_nic *efx)
+static int efx_mcdi_reset_func(struct efx_nic *efx)
 {
-	int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
-	if (rc)
-		netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
-			  __func__, rc);
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN);
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0);
+	MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG,
+			      ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
+	rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf),
+			  NULL, 0, NULL);
 	return rc;
 }
 
@@ -1347,7 +1498,6 @@ static int efx_mcdi_reset_mc(struct efx_nic *efx)
 		return 0;
 	if (rc == 0)
 		rc = -EIO;
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -1368,7 +1518,7 @@ int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
 	if (method == RESET_TYPE_WORLD)
 		return efx_mcdi_reset_mc(efx);
 	else
-		return efx_mcdi_reset_port(efx);
+		return efx_mcdi_reset_func(efx);
 }
 
 static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
@@ -1449,13 +1599,6 @@ int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
 
 	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
 			  NULL, 0, NULL);
-	if (rc)
-		goto fail;
-
-	return 0;
-
-fail:
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -1496,13 +1639,6 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
 	int rc;
 
 	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
-	if (rc)
-		goto fail;
-
-	return 0;
-
-fail:
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -1532,13 +1668,6 @@ static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
 
 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
 			  NULL, 0, NULL);
-	if (rc)
-		goto fail;
-
-	return 0;
-
-fail:
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -1558,14 +1687,10 @@ static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
 			  outbuf, sizeof(outbuf), &outlen);
 	if (rc)
-		goto fail;
+		return rc;
 
 	memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
 	return 0;
-
-fail:
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
-	return rc;
 }
 
 static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
@@ -1585,13 +1710,6 @@ static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
 			  ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
 			  NULL, 0, NULL);
-	if (rc)
-		goto fail;
-
-	return 0;
-
-fail:
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -1609,13 +1727,6 @@ static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
 
 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
 			  NULL, 0, NULL);
-	if (rc)
-		goto fail;
-
-	return 0;
-
-fail:
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -1630,13 +1741,6 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
 
 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
 			  NULL, 0, NULL);
-	if (rc)
-		goto fail;
-
-	return 0;
-
-fail:
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 15816cacb548..52931aebf3c3 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -94,12 +94,14 @@ struct efx_mcdi_mtd_partition {
  * struct efx_mcdi_data - extra state for NICs that implement MCDI
  * @iface: Interface/protocol state
  * @hwmon: Hardware monitor state
+ * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
  */
 struct efx_mcdi_data {
 	struct efx_mcdi_iface iface;
 #ifdef CONFIG_SFC_MCDI_MON
 	struct efx_mcdi_mon hwmon;
 #endif
+	u32 fn_flags;
 };
 
 #ifdef CONFIG_SFC_MCDI_MON
@@ -116,12 +118,19 @@ void efx_mcdi_fini(struct efx_nic *efx);
 int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
 		 size_t inlen, efx_dword_t *outbuf, size_t outlen,
 		 size_t *outlen_actual);
+int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
+		       const efx_dword_t *inbuf, size_t inlen,
+		       efx_dword_t *outbuf, size_t outlen,
+		       size_t *outlen_actual);
 
 int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
 		       const efx_dword_t *inbuf, size_t inlen);
 int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
 			efx_dword_t *outbuf, size_t outlen,
 			size_t *outlen_actual);
+int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd,
+			      size_t inlen, efx_dword_t *outbuf,
+			      size_t outlen, size_t *outlen_actual);
 
 typedef void efx_mcdi_async_completer(struct efx_nic *efx,
 				      unsigned long cookie, int rc,
@@ -131,6 +140,15 @@ int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
 		       const efx_dword_t *inbuf, size_t inlen, size_t outlen,
 		       efx_mcdi_async_completer *complete,
 		       unsigned long cookie);
+int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
+			     const efx_dword_t *inbuf, size_t inlen,
+			     size_t outlen,
+			     efx_mcdi_async_completer *complete,
+			     unsigned long cookie);
+
+void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
+			    size_t inlen, efx_dword_t *outbuf,
+			    size_t outlen, int rc);
 
 int efx_mcdi_poll_reboot(struct efx_nic *efx);
 void efx_mcdi_mode_poll(struct efx_nic *efx);
@@ -147,6 +165,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
  */
 #define MCDI_DECLARE_BUF(_name, _len)					\
 	efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
+#define MCDI_DECLARE_BUF_OUT_OR_ERR(_name, _len)			\
+	MCDI_DECLARE_BUF(_name, max_t(size_t, _len, 8))
 #define _MCDI_PTR(_buf, _offset)					\
 	((u8 *)(_buf) + (_offset))
 #define MCDI_PTR(_buf, _field)						\
@@ -301,6 +321,7 @@ int efx_mcdi_set_mac(struct efx_nic *efx);
 #define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
 void efx_mcdi_mac_start_stats(struct efx_nic *efx);
 void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
+void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
 bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
 enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
 int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index d72ad4fc3617..bc27d5b580f5 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -24,6 +24,15 @@ enum efx_hwmon_type {
 	EFX_HWMON_IN,		/* voltage */
 	EFX_HWMON_CURR,		/* current */
 	EFX_HWMON_POWER,	/* power */
+	EFX_HWMON_TYPES_COUNT
+};
+
+static const char *const efx_hwmon_unit[EFX_HWMON_TYPES_COUNT] = {
+	[EFX_HWMON_TEMP]  = " degC",
+	[EFX_HWMON_COOL]  = " rpm", /* though nonsense for a heatsink */
+	[EFX_HWMON_IN]    = " mV",
+	[EFX_HWMON_CURR]  = " mA",
+	[EFX_HWMON_POWER] = " W",
 };
 
 static const struct {
@@ -33,13 +42,13 @@ static const struct {
 } efx_mcdi_sensor_type[] = {
 #define SENSOR(name, label, hwmon_type, port)				\
 	[MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
-	SENSOR(CONTROLLER_TEMP,		"Controller ext. temp.",    TEMP,  -1),
+	SENSOR(CONTROLLER_TEMP,		"Controller board temp.",   TEMP,  -1),
 	SENSOR(PHY_COMMON_TEMP,		"PHY temp.",		    TEMP,  -1),
-	SENSOR(CONTROLLER_COOLING,	"Controller cooling",	    COOL,  -1),
+	SENSOR(CONTROLLER_COOLING,	"Controller heat sink",	    COOL,  -1),
 	SENSOR(PHY0_TEMP,		"PHY temp.",		    TEMP,  0),
-	SENSOR(PHY0_COOLING,		"PHY cooling",		    COOL,  0),
+	SENSOR(PHY0_COOLING,		"PHY heat sink",	    COOL,  0),
 	SENSOR(PHY1_TEMP,		"PHY temp.",		    TEMP,  1),
-	SENSOR(PHY1_COOLING,		"PHY cooling",		    COOL,  1),
+	SENSOR(PHY1_COOLING,		"PHY heat sink",	    COOL,  1),
 	SENSOR(IN_1V0,			"1.0V supply",		    IN,    -1),
 	SENSOR(IN_1V2,			"1.2V supply",		    IN,    -1),
 	SENSOR(IN_1V8,			"1.8V supply",		    IN,    -1),
@@ -47,36 +56,42 @@ static const struct {
 	SENSOR(IN_3V3,			"3.3V supply",		    IN,    -1),
 	SENSOR(IN_12V0,			"12.0V supply",		    IN,    -1),
 	SENSOR(IN_1V2A,			"1.2V analogue supply",	    IN,    -1),
-	SENSOR(IN_VREF,			"ref. voltage",		    IN,    -1),
-	SENSOR(OUT_VAOE,		"AOE power supply",	    IN,    -1),
-	SENSOR(AOE_TEMP,		"AOE temp.",		    TEMP,  -1),
-	SENSOR(PSU_AOE_TEMP,		"AOE PSU temp.",	    TEMP,  -1),
-	SENSOR(PSU_TEMP,		"Controller PSU temp.",	    TEMP,  -1),
-	SENSOR(FAN_0,			NULL,			    COOL,  -1),
-	SENSOR(FAN_1,			NULL,			    COOL,  -1),
-	SENSOR(FAN_2,			NULL,			    COOL,  -1),
-	SENSOR(FAN_3,			NULL,			    COOL,  -1),
-	SENSOR(FAN_4,			NULL,			    COOL,  -1),
+	SENSOR(IN_VREF,			"Ref. voltage",		    IN,    -1),
+	SENSOR(OUT_VAOE,		"AOE FPGA supply",	    IN,    -1),
+	SENSOR(AOE_TEMP,		"AOE FPGA temp.",	    TEMP,  -1),
+	SENSOR(PSU_AOE_TEMP,		"AOE regulator temp.",	    TEMP,  -1),
+	SENSOR(PSU_TEMP,		"Controller regulator temp.",
+								    TEMP,  -1),
+	SENSOR(FAN_0,			"Fan 0",		    COOL,  -1),
+	SENSOR(FAN_1,			"Fan 1",		    COOL,  -1),
+	SENSOR(FAN_2,			"Fan 2",		    COOL,  -1),
+	SENSOR(FAN_3,			"Fan 3",		    COOL,  -1),
+	SENSOR(FAN_4,			"Fan 4",		    COOL,  -1),
 	SENSOR(IN_VAOE,			"AOE input supply",	    IN,    -1),
 	SENSOR(OUT_IAOE,		"AOE output current",	    CURR,  -1),
 	SENSOR(IN_IAOE,			"AOE input current",	    CURR,  -1),
 	SENSOR(NIC_POWER,		"Board power use",	    POWER, -1),
 	SENSOR(IN_0V9,			"0.9V supply",		    IN,    -1),
-	SENSOR(IN_I0V9,			"0.9V input current",	    CURR,  -1),
-	SENSOR(IN_I1V2,			"1.2V input current",	    CURR,  -1),
-	SENSOR(IN_0V9_ADC,		"0.9V supply (at ADC)",	    IN,    -1),
-	SENSOR(CONTROLLER_2_TEMP,	"Controller ext. temp. 2",  TEMP,  -1),
-	SENSOR(VREG_INTERNAL_TEMP,	"Voltage regulator temp.",  TEMP,  -1),
+	SENSOR(IN_I0V9,			"0.9V supply current",	    CURR,  -1),
+	SENSOR(IN_I1V2,			"1.2V supply current",	    CURR,  -1),
+	SENSOR(IN_0V9_ADC,		"0.9V supply (ext. ADC)",   IN,    -1),
+	SENSOR(CONTROLLER_2_TEMP,	"Controller board temp. 2", TEMP,  -1),
+	SENSOR(VREG_INTERNAL_TEMP,	"Regulator die temp.",	    TEMP,  -1),
 	SENSOR(VREG_0V9_TEMP,		"0.9V regulator temp.",     TEMP,  -1),
 	SENSOR(VREG_1V2_TEMP,		"1.2V regulator temp.",     TEMP,  -1),
-	SENSOR(CONTROLLER_VPTAT,       "Controller int. temp. raw", IN,    -1),
-	SENSOR(CONTROLLER_INTERNAL_TEMP, "Controller int. temp.",   TEMP,  -1),
+	SENSOR(CONTROLLER_VPTAT,
+			      "Controller PTAT voltage (int. ADC)", IN,    -1),
+	SENSOR(CONTROLLER_INTERNAL_TEMP,
+				 "Controller die temp. (int. ADC)", TEMP,  -1),
 	SENSOR(CONTROLLER_VPTAT_EXTADC,
-			      "Controller int. temp. raw (at ADC)", IN,    -1),
+			      "Controller PTAT voltage (ext. ADC)", IN,    -1),
 	SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
-				 "Controller int. temp. (via ADC)", TEMP,  -1),
+				 "Controller die temp. (ext. ADC)", TEMP,  -1),
 	SENSOR(AMBIENT_TEMP,		"Ambient temp.",	    TEMP,  -1),
 	SENSOR(AIRFLOW,			"Air flow raw",		    IN,    -1),
+	SENSOR(VDD08D_VSS08D_CSR,	"0.9V die (int. ADC)",	    IN,    -1),
+	SENSOR(VDD08D_VSS08D_CSR_EXTADC, "0.9V die (ext. ADC)",	    IN,    -1),
+	SENSOR(HOTPOINT_TEMP,  "Controller board temp. (hotpoint)", TEMP,  -1),
 #undef SENSOR
 };
 
@@ -91,7 +106,8 @@ static const char *const sensor_status_names[] = {
 void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
 {
 	unsigned int type, state, value;
-	const char *name = NULL, *state_txt;
+	enum efx_hwmon_type hwmon_type = EFX_HWMON_UNKNOWN;
+	const char *name = NULL, *state_txt, *unit;
 
 	type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
 	state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
@@ -99,16 +115,22 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
 
 	/* Deal gracefully with the board having more drivers than we
 	 * know about, but do not expect new sensor states. */
-	if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
+	if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
 		name = efx_mcdi_sensor_type[type].label;
+		hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+	}
 	if (!name)
 		name = "No sensor name available";
 	EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
 	state_txt = sensor_status_names[state];
+	EFX_BUG_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT);
+	unit = efx_hwmon_unit[hwmon_type];
+	if (!unit)
+		unit = "";
 
 	netif_err(efx, hw, efx->net_dev,
-		  "Sensor %d (%s) reports condition '%s' for raw value %d\n",
-		  type, name, state_txt, value);
+		  "Sensor %d (%s) reports condition '%s' for value %d%s\n",
+		  type, name, state_txt, value, unit);
 }
 
 #ifdef CONFIG_SFC_MCDI_MON
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index e0a63ddb7a6c..a707fb5ef14c 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -224,6 +224,8 @@
 #define MC_CMD_ERR_MAC_EXIST 0x1009
 /* Slave core not present */
 #define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
 
 #define MC_CMD_ERR_CODE_OFST 0
 
@@ -390,6 +392,8 @@
  * AOE_ERR_DATA)
  */
 #define          MCDI_EVENT_AOE_BYTEBLASTER 0x9
+/* enum: DDR ECC status update */
+#define          MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
 #define        MCDI_EVENT_AOE_ERR_DATA_LBN 8
 #define        MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
 #define        MCDI_EVENT_RX_ERR_RXQ_LBN 0
@@ -462,6 +466,10 @@
 #define          MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
 /* enum: the MC has detected an uncorrectable error */
 #define          MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
+/* enum: The MC has entered offline BIST mode */
+#define          MCDI_EVENT_CODE_MC_BIST 0x19
+/* enum: PTP tick event providing current NIC time */
+#define          MCDI_EVENT_CODE_PTP_TIME 0x1a
 /* enum: Artificial event generated by host and posted via MC for test
  * purposes.
  */
@@ -481,15 +489,32 @@
 #define       MCDI_EVENT_TX_ERR_DATA_OFST 0
 #define       MCDI_EVENT_TX_ERR_DATA_LBN 0
 #define       MCDI_EVENT_TX_ERR_DATA_WIDTH 32
-/* Seconds field of timestamp */
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
+ * timestamp
+ */
 #define       MCDI_EVENT_PTP_SECONDS_OFST 0
 #define       MCDI_EVENT_PTP_SECONDS_LBN 0
 #define       MCDI_EVENT_PTP_SECONDS_WIDTH 32
-/* Nanoseconds field of timestamp */
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
+ * timestamp
+ */
+#define       MCDI_EVENT_PTP_MAJOR_OFST 0
+#define       MCDI_EVENT_PTP_MAJOR_LBN 0
+#define       MCDI_EVENT_PTP_MAJOR_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
+ * of timestamp
+ */
 #define       MCDI_EVENT_PTP_NANOSECONDS_OFST 0
 #define       MCDI_EVENT_PTP_NANOSECONDS_LBN 0
 #define       MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
-/* Lowest four bytes of sourceUUID from PTP packet */
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
+ * timestamp
+ */
+#define       MCDI_EVENT_PTP_MINOR_OFST 0
+#define       MCDI_EVENT_PTP_MINOR_LBN 0
+#define       MCDI_EVENT_PTP_MINOR_WIDTH 32
+/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
+ */
 #define       MCDI_EVENT_PTP_UUID_OFST 0
 #define       MCDI_EVENT_PTP_UUID_LBN 0
 #define       MCDI_EVENT_PTP_UUID_WIDTH 32
@@ -505,6 +530,13 @@
 #define       MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
 #define       MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
 #define       MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
+/* For CODE_PTP_TIME events, the major value of the PTP clock */
+#define       MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define       MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
+#define       MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
+/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
+#define       MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
+#define       MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
 
 /* FCDI_EVENT structuredef */
 #define    FCDI_EVENT_LEN 8
@@ -545,8 +577,10 @@
 #define          FCDI_EVENT_CODE_TIMED_READ 0x5
 /* enum: One or more PPS IN events */
 #define          FCDI_EVENT_CODE_PPS_IN 0x6
-/* enum: One or more PPS OUT events */
-#define          FCDI_EVENT_CODE_PPS_OUT 0x7
+/* enum: Tick event from PTP clock */
+#define          FCDI_EVENT_CODE_PTP_TICK 0x7
+/* enum: ECC error counters */
+#define          FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
@@ -560,14 +594,21 @@
 #define       FCDI_EVENT_LINK_STATE_DATA_OFST 0
 #define       FCDI_EVENT_LINK_STATE_DATA_LBN 0
 #define       FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
-#define       FCDI_EVENT_PPS_COUNT_OFST 0
-#define       FCDI_EVENT_PPS_COUNT_LBN 0
-#define       FCDI_EVENT_PPS_COUNT_WIDTH 32
-
-/* FCDI_EXTENDED_EVENT structuredef */
-#define    FCDI_EXTENDED_EVENT_LENMIN 16
-#define    FCDI_EXTENDED_EVENT_LENMAX 248
-#define    FCDI_EXTENDED_EVENT_LEN(num) (8+8*(num))
+#define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
+#define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
+#define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
+#define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+
+/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
+ * to the MC. Note that this structure | is overlayed over a normal FCDI event
+ * such that bits 32-63 containing | event code, level, source etc remain the
+ * same. In this case the data | field of the header is defined to be the
+ * number of timestamps
+ */
+#define    FCDI_EXTENDED_EVENT_PPS_LENMIN 16
+#define    FCDI_EXTENDED_EVENT_PPS_LENMAX 248
+#define    FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
 /* Number of timestamps following */
 #define       FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
 #define       FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
@@ -581,14 +622,14 @@
 #define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
 #define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
 /* Timestamp records comprising the event */
-#define       FCDI_EXTENDED_EVENT_PPS_TIME_OFST 8
-#define       FCDI_EXTENDED_EVENT_PPS_TIME_LEN 8
-#define       FCDI_EXTENDED_EVENT_PPS_TIME_LO_OFST 8
-#define       FCDI_EXTENDED_EVENT_PPS_TIME_HI_OFST 12
-#define       FCDI_EXTENDED_EVENT_PPS_TIME_MINNUM 1
-#define       FCDI_EXTENDED_EVENT_PPS_TIME_MAXNUM 30
-#define       FCDI_EXTENDED_EVENT_PPS_TIME_LBN 64
-#define       FCDI_EXTENDED_EVENT_PPS_TIME_WIDTH 64
+#define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
+#define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
+#define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
+#define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
+#define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
+#define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
+#define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
+#define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
 
 
 /***********************************/
@@ -642,6 +683,10 @@
 #define    MC_CMD_COPYCODE_IN_LEN 16
 /* Source address */
 #define       MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: The main image should be entered via a copy of a single word from and
+ * to this address when none of the other magic behaviours are required.
+ */
+#define          MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
 /* enum: Entering the main image via a copy of a single word from and to this
  * address indicates that it should not attempt to start the datapath CPUs.
  * This is useful for certain soft rebooting scenarios. (Huntington only)
@@ -872,8 +917,28 @@
 #define          MC_CMD_PTP_OP_RST_CLK 0x14
 /* enum: Enable the forwarding of PPS events to the host */
 #define          MC_CMD_PTP_OP_PPS_ENABLE 0x15
+/* enum: Get the time format used by this NIC for PTP operations */
+#define          MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16
+/* enum: Get the clock attributes. NOTE- extended version of
+ * MC_CMD_PTP_OP_GET_TIME_FORMAT
+ */
+#define          MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16
+/* enum: Get corrections that should be applied to the various different
+ * timestamps
+ */
+#define          MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17
+/* enum: Subscribe to receive periodic time events indicating the current NIC
+ * time
+ */
+#define          MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18
+/* enum: Unsubscribe to stop receiving time events */
+#define          MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
+/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
+ * input on the same NIC.
+ */
+#define          MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
 /* enum: Above this for future use. */
-#define          MC_CMD_PTP_OP_MAX 0x16
+#define          MC_CMD_PTP_OP_MAX 0x1b
 
 /* MC_CMD_PTP_IN_ENABLE msgrequest */
 #define    MC_CMD_PTP_IN_ENABLE_LEN 16
@@ -938,8 +1003,12 @@
 #define          MC_CMD_PTP_IN_ADJUST_BITS 0x28
 /* Time adjustment in seconds */
 #define       MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+/* Time adjustment major value */
+#define       MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
 /* Time adjustment in nanoseconds */
 #define       MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+/* Time adjustment minor value */
+#define       MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
 
 /* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
 #define    MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
@@ -1005,8 +1074,12 @@
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
 /* Time adjustment in seconds */
 #define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+/* Time adjustment major value */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
 /* Time adjustment in nanoseconds */
 #define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+/* Time adjustment minor value */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
 
 /* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
 #define    MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
@@ -1078,9 +1151,51 @@
 #define          MC_CMD_PTP_ENABLE_PPS 0x0
 /* enum: Disable */
 #define          MC_CMD_PTP_DISABLE_PPS 0x1
-/* Queueid to send events back */
+/* Queue id to send events back */
 #define       MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
 
+/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
+#define    MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
+#define    MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
+#define    MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
+#define    MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Event queue to send PTP time events to */
+#define       MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+
+/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
+#define    MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Unsubscribe options */
+#define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+/* enum: Unsubscribe a single queue */
+#define          MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
+/* enum: Unsubscribe all queues */
+#define          MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
+/* Event queue ID */
+#define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+
+/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
+#define    MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable PPS test mode, 0 to disable and return result. */
+#define       MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+
 /* MC_CMD_PTP_OUT msgresponse */
 #define    MC_CMD_PTP_OUT_LEN 0
 
@@ -1088,15 +1203,29 @@
 #define    MC_CMD_PTP_OUT_TRANSMIT_LEN 8
 /* Value of seconds timestamp */
 #define       MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+/* Timestamp major value */
+#define       MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
 /* Value of nanoseconds timestamp */
 #define       MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define       MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
+#define    MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */
+#define    MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0
 
 /* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
 #define    MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
 /* Value of seconds timestamp */
 #define       MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+/* Timestamp major value */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
 /* Value of nanoseconds timestamp */
 #define       MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
 
 /* MC_CMD_PTP_OUT_STATUS msgresponse */
 #define    MC_CMD_PTP_OUT_STATUS_LEN 64
@@ -1116,21 +1245,21 @@
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
 /* Number of PPS bad periods */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
-/* Minimum period of PPS pulse */
+/* Minimum period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
-/* Maximum period of PPS pulse */
+/* Maximum period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
-/* Last period of PPS pulse */
+/* Last period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
-/* Mean period of PPS pulse */
+/* Mean period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
-/* Minimum offset of PPS pulse (signed) */
+/* Minimum offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
-/* Maximum offset of PPS pulse (signed) */
+/* Maximum offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
-/* Last offset of PPS pulse (signed) */
+/* Last offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
-/* Mean offset of PPS pulse (signed) */
+/* Mean offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
 
 /* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
@@ -1146,8 +1275,12 @@
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
 /* Value of seconds timestamp */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+/* Timestamp major value */
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
 /* Value of nanoseconds timestamp */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+/* Timestamp minor value */
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
 /* Host time immediately after NIC's hardware clock read */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
 /* Number of nanoseconds waited after reading NIC's hardware clock */
@@ -1177,6 +1310,16 @@
 #define          MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
 /* enum: Timestamp trigger GPIO not working */
 #define          MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
+/* enum: Insufficient PPS events to perform checks */
+#define          MC_CMD_PTP_MANF_PPS_ENOUGH 0xa
+/* enum: PPS time event period not sufficiently close to 1s. */
+#define          MC_CMD_PTP_MANF_PPS_PERIOD 0xb
+/* enum: PPS time event nS reading not sufficiently close to zero. */
+#define          MC_CMD_PTP_MANF_PPS_NS 0xc
+/* enum: PTP peripheral registers incorrect */
+#define          MC_CMD_PTP_MANF_REGISTERS 0xd
+/* enum: Failed to read time from PTP peripheral */
+#define          MC_CMD_PTP_MANF_CLOCK_READ 0xe
 /* Presence of external oscillator */
 #define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
 
@@ -1198,6 +1341,62 @@
 #define       MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
 #define       MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
 
+/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */
+#define    MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define       MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define          MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define          MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define          MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
+
+/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
+#define    MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* Minimum acceptable value for a corrected synchronization timeset. When
+ * comparing host and NIC clock times, the MC returns a set of samples that
+ * contain the host start and end time, the MC time when the host start was
+ * detected and the time the MC waited between reading the time and detecting
+ * the host end. The corrected sync window is the difference between the host
+ * end and start times minus the time that the MC waited for host end.
+ */
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
+#define    MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
+/* Uncorrected error on transmit timestamps in NIC clock format */
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+/* Uncorrected error on receive timestamps in NIC clock format */
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+/* Uncorrected error on PPS input in NIC clock format */
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+
+/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
+#define    MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
+/* Results of testing */
+#define       MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+/*            Enum values, see field(s): */
+/*               MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
+
 
 /***********************************/
 /* MC_CMD_CSR_READ32
@@ -1923,6 +2122,8 @@
 #define          MC_CMD_MEDIA_SFP_PLUS 0x5
 /* enum: 10GBaseT. */
 #define          MC_CMD_MEDIA_BASE_T 0x6
+/* enum: QSFP+. */
+#define          MC_CMD_MEDIA_QSFP_PLUS 0x7
 #define       MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
 /* enum: Native clause 22 */
 #define          MC_CMD_MMD_CLAUSE22 0x0
@@ -2223,6 +2424,8 @@
 #define          MC_CMD_LOOPBACK_SD_FEP_WS  0x21
 /* enum: KR Serdes Serial Wireside. */
 #define          MC_CMD_LOOPBACK_SD_FES_WS  0x22
+/* enum: Near side of AOE Siena side port */
+#define          MC_CMD_LOOPBACK_AOE_INT_NEAR  0x23
 /* Supported loopbacks. */
 #define       MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
 #define       MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
@@ -2286,6 +2489,10 @@
 #define        MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
 #define        MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
 #define        MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6
+#define        MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7
+#define        MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
 /* This returns the negotiated flow control value. */
 #define       MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
 /* enum: Flow control is off. */
@@ -3175,7 +3382,7 @@
 #define       MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
 
 /* MC_CMD_SENSOR_INFO_OUT msgresponse */
-#define    MC_CMD_SENSOR_INFO_OUT_LENMIN 12
+#define    MC_CMD_SENSOR_INFO_OUT_LENMIN 4
 #define    MC_CMD_SENSOR_INFO_OUT_LENMAX 252
 #define    MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
 #define       MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
@@ -3269,16 +3476,18 @@
 #define          MC_CMD_SENSOR_VDD08D_VSS08D_CSR  0x2b
 /* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
 #define          MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC  0x2c
+/* enum: Hotpoint temperature: degC */
+#define          MC_CMD_SENSOR_HOTPOINT_TEMP  0x2d
 /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
 #define       MC_CMD_SENSOR_ENTRY_OFST 4
 #define       MC_CMD_SENSOR_ENTRY_LEN 8
 #define       MC_CMD_SENSOR_ENTRY_LO_OFST 4
 #define       MC_CMD_SENSOR_ENTRY_HI_OFST 8
-#define       MC_CMD_SENSOR_ENTRY_MINNUM 1
+#define       MC_CMD_SENSOR_ENTRY_MINNUM 0
 #define       MC_CMD_SENSOR_ENTRY_MAXNUM 31
 
 /* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
-#define    MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 12
+#define    MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4
 #define    MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
 #define    MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
 #define       MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
@@ -3291,7 +3500,7 @@
 /*            MC_CMD_SENSOR_ENTRY_LEN 8 */
 /*            MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
 /*            MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
-/*            MC_CMD_SENSOR_ENTRY_MINNUM 1 */
+/*            MC_CMD_SENSOR_ENTRY_MINNUM 0 */
 /*            MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
 
 /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
@@ -3864,6 +4073,18 @@
 #define       NVRAM_PARTITION_TYPE_ID_LBN 0
 #define       NVRAM_PARTITION_TYPE_ID_WIDTH 16
 
+/* LICENSED_APP_ID structuredef */
+#define    LICENSED_APP_ID_LEN 4
+#define       LICENSED_APP_ID_ID_OFST 0
+/* enum: OpenOnload */
+#define          LICENSED_APP_ID_ONLOAD            0x1
+/* enum: PTP timestamping */
+#define          LICENSED_APP_ID_PTP               0x2
+/* enum: SolarCapture Pro */
+#define          LICENSED_APP_ID_SOLARCAPTURE_PRO  0x4
+#define       LICENSED_APP_ID_ID_LBN 0
+#define       LICENSED_APP_ID_ID_WIDTH 32
+
 
 /***********************************/
 /* MC_CMD_READ_REGS
@@ -4021,6 +4242,8 @@
 #define        MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
 #define        MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
 #define        MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+#define        MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define        MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -4179,6 +4402,9 @@
 #define        MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
 #define          MC_CMD_PROXY_CMD_IN_VF_NULL  0xffff /* enum */
 
+/* MC_CMD_PROXY_CMD_OUT msgresponse */
+#define    MC_CMD_PROXY_CMD_OUT_LEN 0
+
 
 /***********************************/
 /* MC_CMD_ALLOC_BUFTBL_CHUNK
@@ -4213,7 +4439,7 @@
 
 /* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
 #define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
-#define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 252
+#define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
 #define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
 /* ID */
@@ -4226,7 +4452,7 @@
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
-#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 30
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
 
 /* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
 #define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
@@ -6800,6 +7026,30 @@
 
 
 /***********************************/
+/* MC_CMD_CAP_BLK_READ
+ * Read multiple 64bit words from capture block memory
+ */
+#define MC_CMD_CAP_BLK_READ 0xe7
+
+/* MC_CMD_CAP_BLK_READ_IN msgrequest */
+#define    MC_CMD_CAP_BLK_READ_IN_LEN 12
+#define       MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define       MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define       MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+
+/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
+#define    MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
+#define    MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
+#define    MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
+#define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
+#define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
+#define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
+#define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
+#define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
+#define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
+
+
+/***********************************/
 /* MC_CMD_DUMP_DO
  * Take a dump of the DUT state
  */
@@ -6826,6 +7076,10 @@
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
 #define          MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH  0x2 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+/* enum: The uart port this command was received over (if using a uart
+ * transport)
+ */
+#define          MC_CMD_DUMP_DO_IN_UART_PORT_SRC  0xff
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
 #define          MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM  0x0 /* enum */
@@ -6942,39 +7196,68 @@
 
 
 /***********************************/
-/* MC_CMD_START_KR_EYE_PLOT
- * Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
- * signal.
- */
-#define MC_CMD_START_KR_EYE_PLOT 0xee
-
-/* MC_CMD_START_KR_EYE_PLOT_IN msgrequest */
-#define    MC_CMD_START_KR_EYE_PLOT_IN_LEN 4
-#define       MC_CMD_START_KR_EYE_PLOT_IN_LANE_OFST 0
-
-/* MC_CMD_START_KR_EYE_PLOT_OUT msgresponse */
-#define    MC_CMD_START_KR_EYE_PLOT_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_POLL_KR_EYE_PLOT
- * Poll KR Serdes Eye diagram plot. Returns one row of BER data. The caller
- * should call this command repeatedly after starting eye plot, until no more
- * data is returned.
- */
-#define MC_CMD_POLL_KR_EYE_PLOT 0xef
-
-/* MC_CMD_POLL_KR_EYE_PLOT_IN msgrequest */
-#define    MC_CMD_POLL_KR_EYE_PLOT_IN_LEN 0
-
-/* MC_CMD_POLL_KR_EYE_PLOT_OUT msgresponse */
-#define    MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMIN 0
-#define    MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMAX 252
-#define    MC_CMD_POLL_KR_EYE_PLOT_OUT_LEN(num) (0+2*(num))
-#define       MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_OFST 0
-#define       MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_LEN 2
-#define       MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MINNUM 0
-#define       MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+/* MC_CMD_UART_SEND_DATA
+ * Send checksummed[sic] block of data over the uart. Response is a placeholder
+ * should we wish to make this reliable; currently requests are fire-and-
+ * forget.
+ */
+#define MC_CMD_UART_SEND_DATA 0xee
+
+/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
+#define    MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
+#define    MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
+#define    MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
+/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
+#define       MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define       MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define       MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define       MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define       MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
+#define       MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
+#define       MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
+#define       MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
+
+/* MC_CMD_UART_SEND_DATA_IN msgresponse */
+#define    MC_CMD_UART_SEND_DATA_IN_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_RECV_DATA
+ * Request checksummed[sic] block of data over the uart. Only a placeholder,
+ * subject to change and not currently implemented.
+ */
+#define MC_CMD_UART_RECV_DATA 0xef
+
+/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
+#define    MC_CMD_UART_RECV_DATA_OUT_LEN 16
+/* CRC32 over OFFSET, LENGTH, RESERVED */
+#define       MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+/* Offset from which to read the data */
+#define       MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define       MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define       MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+
+/* MC_CMD_UART_RECV_DATA_IN msgresponse */
+#define    MC_CMD_UART_RECV_DATA_IN_LENMIN 16
+#define    MC_CMD_UART_RECV_DATA_IN_LENMAX 252
+#define    MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
+/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
+#define       MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+/* Length of data */
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+/* Reserved for future use */
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define       MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
+#define       MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
+#define       MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
+#define       MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
 
 
 /***********************************/
@@ -7026,6 +7309,15 @@
 #define          MC_CMD_KR_TUNE_IN_TXEQ_SET  0x3
 /* enum: Force KR Serdes reset / recalibration */
 #define          MC_CMD_KR_TUNE_IN_RECAL  0x4
+/* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
+ * signal.
+ */
+#define          MC_CMD_KR_TUNE_IN_START_EYE_PLOT  0x5
+/* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define          MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT  0x6
 /* Align the arguments to 32 bits */
 #define       MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
 #define       MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
@@ -7123,6 +7415,91 @@
 /* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */
 #define    MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0
 
+/* MC_CMD_KR_TUNE_TXEQ_GET_IN msgrequest */
+#define    MC_CMD_KR_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT msgresponse */
+#define    MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define    MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define    MC_CMD_KR_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* TXEQ Parameter */
+#define       MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define       MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define       MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define       MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TX Amplitude */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV  0x0
+/* enum: De-Emphasis Tap1 Magnitude (0-7) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE  0x1
+/* enum: De-Emphasis Tap1 Fine */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV  0x2
+/* enum: De-Emphasis Tap2 Magnitude (0-6) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2  0x3
+/* enum: De-Emphasis Tap2 Fine */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV  0x4
+/* enum: Pre-Emphasis Magnitude */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E  0x5
+/* enum: Pre-Emphasis Fine */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV  0x6
+/* enum: TX Slew Rate Coarse control */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY  0x7
+/* enum: TX Slew Rate Fine control */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET  0x8
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0  0x0 /* enum */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1  0x1 /* enum */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2  0x2 /* enum */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3  0x3 /* enum */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL  0x4 /* enum */
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_LBN 24
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_IN msgrequest */
+#define    MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMIN 8
+#define    MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX 252
+#define    MC_CMD_KR_TUNE_TXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* TXEQ Parameter */
+#define       MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_OFST 4
+#define       MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LEN 4
+#define       MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MINNUM 1
+#define       MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM 62
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_LBN 0
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_WIDTH 8
+/*             Enum values, see field(s): */
+/*                MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_ID */
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_LBN 8
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/*             Enum values, see field(s): */
+/*                MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_LANE */
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_LBN 11
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_WIDTH 5
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_LBN 24
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_OUT msgresponse */
+#define    MC_CMD_KR_TUNE_TXEQ_SET_OUT_LEN 0
+
 /* MC_CMD_KR_TUNE_RECAL_IN msgrequest */
 #define    MC_CMD_KR_TUNE_RECAL_IN_LEN 4
 /* Requested operation */
@@ -7135,6 +7512,37 @@
 /* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */
 #define    MC_CMD_KR_TUNE_RECAL_OUT_LEN 0
 
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_IN msgrequest */
+#define    MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define    MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define    MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define    MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define    MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define    MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
 
 /***********************************/
 /* MC_CMD_PCIE_TUNE
@@ -7157,6 +7565,13 @@
 #define          MC_CMD_PCIE_TUNE_IN_TXEQ_GET  0x2
 /* enum: Override TX Driver settings */
 #define          MC_CMD_PCIE_TUNE_IN_TXEQ_SET  0x3
+/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
+#define          MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT  0x5
+/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define          MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT  0x6
 /* Align the arguments to 32 bits */
 #define       MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
 #define       MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
@@ -7258,6 +7673,37 @@
 #define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
 #define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
 
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */
+#define    MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+#define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define    MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define    MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define    MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define    MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define    MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
 
 /***********************************/
 /* MC_CMD_LICENSING
@@ -7310,5 +7756,152 @@
  */
 #define MC_CMD_MC2MC_PROXY 0xf4
 
+/* MC_CMD_MC2MC_PROXY_IN msgrequest */
+#define    MC_CMD_MC2MC_PROXY_IN_LEN 0
+
+/* MC_CMD_MC2MC_PROXY_OUT msgresponse */
+#define    MC_CMD_MC2MC_PROXY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
+ * or a reboot of the MC.)
+ */
+#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+
+/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
+#define    MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
+/* application ID to query (LICENSED_APP_ID_xxx) */
+#define       MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+
+/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
+#define    MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define       MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+/* enum: no (or invalid) license is present for the application */
+#define          MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED  0x0
+/* enum: a valid license is present for the application */
+#define          MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED  0x1
+
+
+/***********************************/
+/* MC_CMD_LICENSED_APP_OP
+ * Perform an action for an individual licensed application.
+ */
+#define MC_CMD_LICENSED_APP_OP 0xf6
+
+/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
+#define    MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
+#define    MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
+#define    MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
+/* application ID */
+#define       MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define       MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+/* enum: validate application */
+#define          MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE  0x0
+/* arguments specific to this particular operation */
+#define       MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
+#define       MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
+#define       MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
+#define       MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
+
+/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
+#define    MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
+#define    MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
+#define    MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
+/* result specific to this particular operation */
+#define       MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
+#define       MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
+#define       MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
+#define    MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
+/* application ID */
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+/* validation challenge */
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
+#define    MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
+/* feature expiry (time_t) */
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+/* validation response */
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_SNIFF_CONFIG
+ * Configure port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic delivered to the host (non-promiscuous
+ * mode) or all traffic arriving at the port (promiscuous mode) may be
+ * delivered to a specific queue, or a set of queues with RSS.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define    MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+#define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
+#define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define          MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE  0x0
+/* enum: receive to multiple queues using RSS context */
+#define          MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS  0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define    MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_SNIFF_CONFIG
+ * Obtain the current port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define    MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define    MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+#define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
+#define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define          MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE  0x0
+/* enum: receiving to multiple queues using RSS context */
+#define          MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS  0x1
+/* RSS context (for RX_MODE_RSS) */
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
 
 #endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 7b6be61d549f..91d23252f8fa 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -90,13 +90,6 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
 
 	rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
 			  NULL, 0, NULL);
-	if (rc)
-		goto fail;
-
-	return 0;
-
-fail:
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
@@ -143,17 +136,13 @@ static int efx_mcdi_mdio_read(struct net_device *net_dev,
 	rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
 			  outbuf, sizeof(outbuf), &outlen);
 	if (rc)
-		goto fail;
+		return rc;
 
 	if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
 	    MC_CMD_MDIO_STATUS_GOOD)
 		return -EIO;
 
 	return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
-
-fail:
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
-	return rc;
 }
 
 static int efx_mcdi_mdio_write(struct net_device *net_dev,
@@ -174,17 +163,13 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
 	rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
 			  outbuf, sizeof(outbuf), &outlen);
 	if (rc)
-		goto fail;
+		return rc;
 
 	if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
 	    MC_CMD_MDIO_STATUS_GOOD)
 		return -EIO;
 
 	return 0;
-
-fail:
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
-	return rc;
 }
 
 static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
@@ -487,17 +472,14 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx)
 
 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
 			  outbuf, sizeof(outbuf), NULL);
-	if (rc) {
-		netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
-			  __func__, rc);
+	if (rc)
 		efx->link_state.up = false;
-	} else {
+	else
 		efx_mcdi_phy_decode_link(
 			efx, &efx->link_state,
 			MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
 			MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
 			MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
-	}
 
 	return !efx_link_state_equal(&efx->link_state, &old_state);
 }
@@ -531,11 +513,8 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e
 	BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
 			  outbuf, sizeof(outbuf), NULL);
-	if (rc) {
-		netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
-			  __func__, rc);
+	if (rc)
 		return;
-	}
 	ecmd->lp_advertising =
 		mcdi_to_ethtool_cap(phy_cfg->media,
 				    MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
@@ -918,21 +897,29 @@ bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
 
 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
 			  outbuf, sizeof(outbuf), &outlength);
-	if (rc) {
-		netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
-			  __func__, rc);
+	if (rc)
 		return true;
-	}
 
 	return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
 }
 
-static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
-			      u32 dma_len, int enable, int clear)
+enum efx_stats_action {
+	EFX_STATS_ENABLE,
+	EFX_STATS_DISABLE,
+	EFX_STATS_PULL,
+};
+
+static int efx_mcdi_mac_stats(struct efx_nic *efx,
+			      enum efx_stats_action action, int clear)
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
 	int rc;
-	int period = enable ? 1000 : 0;
+	int change = action == EFX_STATS_PULL ? 0 : 1;
+	int enable = action == EFX_STATS_ENABLE ? 1 : 0;
+	int period = action == EFX_STATS_ENABLE ? 1000 : 0;
+	dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
+	u32 dma_len = action != EFX_STATS_DISABLE ?
+		MC_CMD_MAC_NSTATS * sizeof(u64) : 0;
 
 	BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
 
@@ -940,8 +927,8 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
 	MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
 			      MAC_STATS_IN_DMA, !!enable,
 			      MAC_STATS_IN_CLEAR, clear,
-			      MAC_STATS_IN_PERIODIC_CHANGE, 1,
-			      MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
+			      MAC_STATS_IN_PERIODIC_CHANGE, change,
+			      MAC_STATS_IN_PERIODIC_ENABLE, enable,
 			      MAC_STATS_IN_PERIODIC_CLEAR, 0,
 			      MAC_STATS_IN_PERIODIC_NOEVENT, 1,
 			      MAC_STATS_IN_PERIOD_MS, period);
@@ -949,14 +936,6 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
 
 	rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
 			  NULL, 0, NULL);
-	if (rc)
-		goto fail;
-
-	return 0;
-
-fail:
-	netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
-		  __func__, enable ? "enable" : "disable", rc);
 	return rc;
 }
 
@@ -966,13 +945,29 @@ void efx_mcdi_mac_start_stats(struct efx_nic *efx)
 
 	dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
 
-	efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
-			   MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
+	efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
 }
 
 void efx_mcdi_mac_stop_stats(struct efx_nic *efx)
 {
-	efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
+	efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0);
+}
+
+#define EFX_MAC_STATS_WAIT_US 100
+#define EFX_MAC_STATS_WAIT_ATTEMPTS 10
+
+void efx_mcdi_mac_pull_stats(struct efx_nic *efx)
+{
+	__le64 *dma_stats = efx->stats_buffer.addr;
+	int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
+
+	dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+	efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
+
+	while (dma_stats[MC_CMD_MAC_GENERATION_END] ==
+				EFX_MC_STATS_GENERATION_INVALID &&
+			attempts-- != 0)
+		udelay(EFX_MAC_STATS_WAIT_US);
 }
 
 int efx_mcdi_port_probe(struct efx_nic *efx)
@@ -1003,7 +998,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
 		  efx->stats_buffer.addr,
 		  (u64)virt_to_phys(efx->stats_buffer.addr));
 
-	efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
+	efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 1);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 542a0d252ae0..af2b8c59a903 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -91,6 +91,7 @@
 
 /* Forward declare Precision Time Protocol (PTP) support structure. */
 struct efx_ptp_data;
+struct hwtstamp_config;
 
 struct efx_self_tests;
 
@@ -287,12 +288,9 @@ struct efx_rx_buffer {
  * Used to facilitate sharing dma mappings between recycled rx buffers
  * and those passed up to the kernel.
  *
- * @refcnt: Number of struct efx_rx_buffer's referencing this page.
- *	When refcnt falls to zero, the page is unmapped for dma
  * @dma_addr: The dma address of this page.
  */
 struct efx_rx_page_state {
-	unsigned refcnt;
 	dma_addr_t dma_addr;
 
 	unsigned int __pad[0] ____cacheline_aligned;
@@ -362,10 +360,11 @@ struct efx_rx_queue {
 	unsigned int slow_fill_count;
 };
 
-enum efx_rx_alloc_method {
-	RX_ALLOC_METHOD_AUTO = 0,
-	RX_ALLOC_METHOD_SKB = 1,
-	RX_ALLOC_METHOD_PAGE = 2,
+enum efx_sync_events_state {
+	SYNC_EVENTS_DISABLED = 0,
+	SYNC_EVENTS_QUIESCENT,
+	SYNC_EVENTS_REQUESTED,
+	SYNC_EVENTS_VALID,
 };
 
 /**
@@ -407,6 +406,9 @@ enum efx_rx_alloc_method {
  *	by __efx_rx_packet(), if @rx_pkt_n_frags != 0
  * @rx_queue: RX queue for this channel
  * @tx_queue: TX queues for this channel
+ * @sync_events_state: Current state of sync events on this channel
+ * @sync_timestamp_major: Major part of the last ptp sync event
+ * @sync_timestamp_minor: Minor part of the last ptp sync event
  */
 struct efx_channel {
 	struct efx_nic *efx;
@@ -445,6 +447,10 @@ struct efx_channel {
 
 	struct efx_rx_queue rx_queue;
 	struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
+
+	enum efx_sync_events_state sync_events_state;
+	u32 sync_timestamp_major;
+	u32 sync_timestamp_minor;
 };
 
 /**
@@ -520,15 +526,6 @@ enum nic_state {
 	STATE_RECOVERY = 3,	/* device recovering from PCI error */
 };
 
-/*
- * Alignment of the skb->head which wraps a page-allocated RX buffer
- *
- * The skb allocated to wrap an rx_buffer can have this alignment. Since
- * the data is memcpy'd from the rx_buf, it does not need to be equal to
- * NET_IP_ALIGN.
- */
-#define EFX_PAGE_SKB_ALIGN 2
-
 /* Forward declaration */
 struct efx_nic;
 
@@ -651,6 +648,13 @@ struct vfdi_status;
  * struct efx_nic - an Efx NIC
  * @name: Device name (net device name or bus id before net device registered)
  * @pci_dev: The PCI device
+ * @node: List node for maintaning primary/secondary function lists
+ * @primary: &struct efx_nic instance for the primary function of this
+ *	controller.  May be the same structure, and may be %NULL if no
+ *	primary function is bound.  Serialised by rtnl_lock.
+ * @secondary_list: List of &struct efx_nic instances for the secondary PCI
+ *	functions of the controller, if this is for the primary function.
+ *	Serialised by rtnl_lock.
  * @type: Controller type attributes
  * @legacy_irq: IRQ number
  * @workqueue: Workqueue for port reconfigures and the HW monitor.
@@ -694,6 +698,8 @@ struct vfdi_status;
  *	(valid only if @rx_prefix_size != 0; always negative)
  * @rx_packet_len_offset: Offset of RX packet length from start of packet data
  *	(valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative)
+ * @rx_packet_ts_offset: Offset of timestamp from start of packet data
+ *	(valid only if channel->sync_timestamps_enabled; always negative)
  * @rx_hash_key: Toeplitz hash key for RSS
  * @rx_indir_table: Indirection table for RSS
  * @rx_scatter: Scatter mode enabled for receives
@@ -763,6 +769,7 @@ struct vfdi_status;
  * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
  * @peer_work: Work item to broadcast peer addresses to VMs.
  * @ptp_data: PTP state data
+ * @vpd_sn: Serial number read from VPD
  * @monitor_work: Hardware monitor workitem
  * @biu_lock: BIU (bus interface unit) lock
  * @last_irq_cpu: Last CPU to handle a possible test interrupt.  This
@@ -777,6 +784,9 @@ struct efx_nic {
 	/* The following fields should be written very rarely */
 
 	char name[IFNAMSIZ];
+	struct list_head node;
+	struct efx_nic *primary;
+	struct list_head secondary_list;
 	struct pci_dev *pci_dev;
 	unsigned int port_num;
 	const struct efx_nic_type *type;
@@ -828,6 +838,7 @@ struct efx_nic {
 	unsigned int rx_prefix_size;
 	int rx_packet_hash_offset;
 	int rx_packet_len_offset;
+	int rx_packet_ts_offset;
 	u8 rx_hash_key[40];
 	u32 rx_indir_table[128];
 	bool rx_scatter;
@@ -852,10 +863,14 @@ struct efx_nic {
 	struct work_struct mac_work;
 	bool port_enabled;
 
+	bool mc_bist_for_other_fn;
 	bool port_initialized;
 	struct net_device *net_dev;
 
 	struct efx_buffer stats_buffer;
+	u64 rx_nodesc_drops_total;
+	u64 rx_nodesc_drops_while_down;
+	bool rx_nodesc_drops_prev_state;
 
 	unsigned int phy_type;
 	const struct efx_phy_operations *phy_op;
@@ -907,6 +922,8 @@ struct efx_nic {
 
 	struct efx_ptp_data *ptp_data;
 
+	char *vpd_sn;
+
 	/* The following fields may be written more often */
 
 	struct delayed_work monitor_work ____cacheline_aligned_in_smp;
@@ -959,6 +976,7 @@ struct efx_mtd_partition {
  * @update_stats: Update statistics not provided by event handling.
  *	Either argument may be %NULL.
  * @start_stats: Start the regular fetching of statistics
+ * @pull_stats: Pull stats from the NIC and wait until they arrive.
  * @stop_stats: Stop the regular fetching of statistics
  * @set_id_led: Set state of identifying LED or revert to automatic function
  * @push_irq_moderation: Apply interrupt moderation value
@@ -997,7 +1015,7 @@ struct efx_mtd_partition {
  * @tx_init: Initialise TX queue on the NIC
  * @tx_remove: Free resources for TX queue
  * @tx_write: Write TX descriptors and doorbell
- * @rx_push_indir_table: Write RSS indirection table to the NIC
+ * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
  * @rx_probe: Allocate resources for RX queue
  * @rx_init: Initialise RX queue on the NIC
  * @rx_remove: Free resources for RX queue
@@ -1017,7 +1035,8 @@ struct efx_mtd_partition {
  * @filter_insert: add or replace a filter
  * @filter_remove_safe: remove a filter by ID, carefully
  * @filter_get_safe: retrieve a filter by ID, carefully
- * @filter_clear_rx: remove RX filters by priority
+ * @filter_clear_rx: Remove all RX filters whose priority is less than or
+ *	equal to the given priority and is not %EFX_FILTER_PRI_AUTO
  * @filter_count_rx_used: Get the number of filters in use at a given priority
  * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
  * @filter_get_rx_ids: Get list of RX filters at a given priority
@@ -1037,6 +1056,12 @@ struct efx_mtd_partition {
  * @mtd_sync: Wait for write-back to complete on MTD partition.  This
  *	also notifies the driver that a writer has finished using this
  *	partition.
+ * @ptp_write_host_time: Send host time to MC as part of sync protocol
+ * @ptp_set_ts_sync_events: Enable or disable sync events for inline RX
+ *	timestamping, possibly only temporarily for the purposes of a reset.
+ * @ptp_set_ts_config: Set hardware timestamp configuration.  The flags
+ *	and tx_type will already have been validated but this operation
+ *	must validate and update rx_filter.
  * @revision: Hardware architecture revision
  * @txd_ptr_tbl_base: TX descriptor ring base address
  * @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1046,6 +1071,7 @@ struct efx_mtd_partition {
  * @max_dma_mask: Maximum possible DMA mask
  * @rx_prefix_size: Size of RX prefix before packet data
  * @rx_hash_offset: Offset of RX flow hash within prefix
+ * @rx_ts_offset: Offset of timestamp within prefix
  * @rx_buffer_padding: Size of padding at end of RX packet
  * @can_rx_scatter: NIC is able to scatter packets to multiple buffers
  * @always_rx_scatter: NIC will always scatter packets to multiple buffers
@@ -1055,6 +1081,7 @@ struct efx_mtd_partition {
  * @offload_features: net_device feature flags for protocol offload
  *	features implemented in hardware
  * @mcdi_max_ver: Maximum MCDI version supported
+ * @hwtstamp_filters: Mask of hardware timestamp filter types supported
  */
 struct efx_nic_type {
 	unsigned int (*mem_map_size)(struct efx_nic *efx);
@@ -1077,6 +1104,7 @@ struct efx_nic_type {
 	size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
 			       struct rtnl_link_stats64 *core_stats);
 	void (*start_stats)(struct efx_nic *efx);
+	void (*pull_stats)(struct efx_nic *efx);
 	void (*stop_stats)(struct efx_nic *efx);
 	void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
 	void (*push_irq_moderation)(struct efx_channel *channel);
@@ -1105,7 +1133,7 @@ struct efx_nic_type {
 	void (*tx_init)(struct efx_tx_queue *tx_queue);
 	void (*tx_remove)(struct efx_tx_queue *tx_queue);
 	void (*tx_write)(struct efx_tx_queue *tx_queue);
-	void (*rx_push_indir_table)(struct efx_nic *efx);
+	void (*rx_push_rss_config)(struct efx_nic *efx);
 	int (*rx_probe)(struct efx_rx_queue *rx_queue);
 	void (*rx_init)(struct efx_rx_queue *rx_queue);
 	void (*rx_remove)(struct efx_rx_queue *rx_queue);
@@ -1130,8 +1158,8 @@ struct efx_nic_type {
 	int (*filter_get_safe)(struct efx_nic *efx,
 			       enum efx_filter_priority priority,
 			       u32 filter_id, struct efx_filter_spec *);
-	void (*filter_clear_rx)(struct efx_nic *efx,
-				enum efx_filter_priority priority);
+	int (*filter_clear_rx)(struct efx_nic *efx,
+			       enum efx_filter_priority priority);
 	u32 (*filter_count_rx_used)(struct efx_nic *efx,
 				    enum efx_filter_priority priority);
 	u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
@@ -1155,6 +1183,9 @@ struct efx_nic_type {
 	int (*mtd_sync)(struct mtd_info *mtd);
 #endif
 	void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
+	int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
+	int (*ptp_set_ts_config)(struct efx_nic *efx,
+				 struct hwtstamp_config *init);
 
 	int revision;
 	unsigned int txd_ptr_tbl_base;
@@ -1165,6 +1196,7 @@ struct efx_nic_type {
 	u64 max_dma_mask;
 	unsigned int rx_prefix_size;
 	unsigned int rx_hash_offset;
+	unsigned int rx_ts_offset;
 	unsigned int rx_buffer_padding;
 	bool can_rx_scatter;
 	bool always_rx_scatter;
@@ -1173,6 +1205,7 @@ struct efx_nic_type {
 	netdev_features_t offload_features;
 	int mcdi_max_ver;
 	unsigned int max_rx_ip_filters;
+	u32 hwtstamp_filters;
 };
 
 /**************************************************************************
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 9c90bf56090f..79226b19e3c4 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -519,3 +519,15 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
 		}
 	}
 }
+
+void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
+{
+	/* if down, or this is the first update after coming up */
+	if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
+		efx->rx_nodesc_drops_while_down +=
+			*rx_nodesc_drops - efx->rx_nodesc_drops_total;
+	efx->rx_nodesc_drops_total = *rx_nodesc_drops;
+	efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
+	*rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
+}
+
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 91c63ec79c5f..a001fae1a8d7 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -412,8 +412,8 @@ enum {
 	EF10_STAT_rx_dp_q_disabled_packets,
 	EF10_STAT_rx_dp_di_dropped_packets,
 	EF10_STAT_rx_dp_streaming_packets,
-	EF10_STAT_rx_dp_emerg_fetch,
-	EF10_STAT_rx_dp_emerg_wait,
+	EF10_STAT_rx_dp_hlb_fetch,
+	EF10_STAT_rx_dp_hlb_wait,
 	EF10_STAT_COUNT
 };
 
@@ -554,12 +554,29 @@ int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
 			      bool spoofchk);
 
 struct ethtool_ts_info;
-void efx_ptp_probe(struct efx_nic *efx);
-int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
+int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
+void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
+void efx_ptp_remove(struct efx_nic *efx);
+int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
+int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
 void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
 bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+int efx_ptp_get_mode(struct efx_nic *efx);
+int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+			unsigned int new_mode);
 int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
 void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
+size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats);
+void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
+void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+				   struct sk_buff *skb);
+static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+					       struct sk_buff *skb)
+{
+	if (channel->sync_events_state == SYNC_EVENTS_VALID)
+		__efx_rx_skb_attach_timestamp(channel, skb);
+}
 void efx_ptp_start_datapath(struct efx_nic *efx);
 void efx_ptp_stop_datapath(struct efx_nic *efx);
 
@@ -678,8 +695,8 @@ int efx_farch_filter_remove_safe(struct efx_nic *efx,
 int efx_farch_filter_get_safe(struct efx_nic *efx,
 			      enum efx_filter_priority priority, u32 filter_id,
 			      struct efx_filter_spec *);
-void efx_farch_filter_clear_rx(struct efx_nic *efx,
-			       enum efx_filter_priority priority);
+int efx_farch_filter_clear_rx(struct efx_nic *efx,
+			      enum efx_filter_priority priority);
 u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
 				   enum efx_filter_priority priority);
 u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
@@ -747,10 +764,6 @@ int falcon_reset_xaui(struct efx_nic *efx);
 void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
 void efx_farch_init_common(struct efx_nic *efx);
 void efx_ef10_handle_drain_event(struct efx_nic *efx);
-static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
-{
-	efx->type->rx_push_indir_table(efx);
-}
 void efx_farch_rx_push_indir_table(struct efx_nic *efx);
 
 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
@@ -774,6 +787,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
 void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
 			  const unsigned long *mask, u64 *stats,
 			  const void *dma_buf, bool accumulate);
+void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
 
 #define EFX_MAX_FLUSH_TIME 5000
 
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 3dd39dcfe36b..eb75fbd11a01 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -62,7 +62,7 @@
 #define	SYNCHRONISATION_GRANULARITY_NS	200
 
 /* Minimum permitted length of a (corrected) synchronisation time */
-#define	MIN_SYNCHRONISATION_NS		120
+#define	DEFAULT_MIN_SYNCHRONISATION_NS	120
 
 /* Maximum permitted length of a (corrected) synchronisation time */
 #define	MAX_SYNCHRONISATION_NS		1000
@@ -195,26 +195,29 @@ struct efx_ptp_event_rx {
 /**
  * struct efx_ptp_timeset - Synchronisation between host and MC
  * @host_start: Host time immediately before hardware timestamp taken
- * @seconds: Hardware timestamp, seconds
- * @nanoseconds: Hardware timestamp, nanoseconds
+ * @major: Hardware timestamp, major
+ * @minor: Hardware timestamp, minor
  * @host_end: Host time immediately after hardware timestamp taken
- * @waitns: Number of nanoseconds between hardware timestamp being read and
+ * @wait: Number of NIC clock ticks between hardware timestamp being read and
  *          host end time being seen
  * @window: Difference of host_end and host_start
  * @valid: Whether this timeset is valid
  */
 struct efx_ptp_timeset {
 	u32 host_start;
-	u32 seconds;
-	u32 nanoseconds;
+	u32 major;
+	u32 minor;
 	u32 host_end;
-	u32 waitns;
+	u32 wait;
 	u32 window;	/* Derived: end - start, allowing for wrap */
 };
 
 /**
  * struct efx_ptp_data - Precision Time Protocol (PTP) state
- * @channel: The PTP channel
+ * @efx: The NIC context
+ * @channel: The PTP channel (Siena only)
+ * @rx_ts_inline: Flag for whether RX timestamps are inline (else they are
+ *	separate events)
  * @rxq: Receive queue (awaiting timestamps)
  * @txq: Transmit queue
  * @evt_list: List of MC receive events awaiting packets
@@ -231,41 +234,42 @@ struct efx_ptp_timeset {
  * @config: Current timestamp configuration
  * @enabled: PTP operation enabled
  * @mode: Mode in which PTP operating (PTP version)
+ * @time_format: Time format supported by this NIC
+ * @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time
+ * @nic_to_kernel_time: Function to convert from NIC to kernel time
+ * @min_synchronisation_ns: Minimum acceptable corrected sync window
+ * @ts_corrections.tx: Required driver correction of transmit timestamps
+ * @ts_corrections.rx: Required driver correction of receive timestamps
+ * @ts_corrections.pps_out: PPS output error (information only)
+ * @ts_corrections.pps_in: Required driver correction of PPS input timestamps
  * @evt_frags: Partly assembled PTP events
  * @evt_frag_idx: Current fragment number
  * @evt_code: Last event code
  * @start: Address at which MC indicates ready for synchronisation
  * @host_time_pps: Host time at last PPS
- * @last_sync_ns: Last number of nanoseconds between readings when synchronising
- * @base_sync_ns: Number of nanoseconds for last synchronisation.
- * @base_sync_valid: Whether base_sync_time is valid.
  * @current_adjfreq: Current ppb adjustment.
- * @phc_clock: Pointer to registered phc device
+ * @phc_clock: Pointer to registered phc device (if primary function)
  * @phc_clock_info: Registration structure for phc device
  * @pps_work: pps work task for handling pps events
  * @pps_workwq: pps work queue
  * @nic_ts_enabled: Flag indicating if NIC generated TS events are handled
  * @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids
  *         allocations in main data path).
- * @debug_ptp_dir: PTP debugfs directory
- * @missed_rx_sync: Number of packets received without syncrhonisation.
  * @good_syncs: Number of successful synchronisations.
- * @no_time_syncs: Number of synchronisations with no good times.
- * @bad_sync_durations: Number of synchronisations with bad durations.
+ * @fast_syncs: Number of synchronisations requiring short delay
  * @bad_syncs: Number of failed synchronisations.
- * @last_sync_time: Number of nanoseconds for last synchronisation.
  * @sync_timeouts: Number of synchronisation timeouts
- * @fast_syncs: Number of synchronisations requiring short delay
- * @min_sync_delta: Minimum time between event and synchronisation
- * @max_sync_delta: Maximum time between event and synchronisation
- * @average_sync_delta: Average time between event and synchronisation.
- *                      Modified moving average.
- * @last_sync_delta: Last time between event and synchronisation
- * @mc_stats: Context value for MC statistics
+ * @no_time_syncs: Number of synchronisations with no good times.
+ * @invalid_sync_windows: Number of sync windows with bad durations.
+ * @undersize_sync_windows: Number of corrected sync windows that are too small
+ * @oversize_sync_windows: Number of corrected sync windows that are too large
+ * @rx_no_timestamp: Number of packets received without a timestamp.
  * @timeset: Last set of synchronisation statistics.
  */
 struct efx_ptp_data {
+	struct efx_nic *efx;
 	struct efx_channel *channel;
+	bool rx_ts_inline;
 	struct sk_buff_head rxq;
 	struct sk_buff_head txq;
 	struct list_head evt_list;
@@ -282,14 +286,22 @@ struct efx_ptp_data {
 	struct hwtstamp_config config;
 	bool enabled;
 	unsigned int mode;
+	unsigned int time_format;
+	void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor);
+	ktime_t (*nic_to_kernel_time)(u32 nic_major, u32 nic_minor,
+				      s32 correction);
+	unsigned int min_synchronisation_ns;
+	struct {
+		s32 tx;
+		s32 rx;
+		s32 pps_out;
+		s32 pps_in;
+	} ts_corrections;
 	efx_qword_t evt_frags[MAX_EVENT_FRAGS];
 	int evt_frag_idx;
 	int evt_code;
 	struct efx_buffer start;
 	struct pps_event_time host_time_pps;
-	unsigned last_sync_ns;
-	unsigned base_sync_ns;
-	bool base_sync_valid;
 	s64 current_adjfreq;
 	struct ptp_clock *phc_clock;
 	struct ptp_clock_info phc_clock_info;
@@ -297,6 +309,16 @@ struct efx_ptp_data {
 	struct workqueue_struct *pps_workwq;
 	bool nic_ts_enabled;
 	MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
+
+	unsigned int good_syncs;
+	unsigned int fast_syncs;
+	unsigned int bad_syncs;
+	unsigned int sync_timeouts;
+	unsigned int no_time_syncs;
+	unsigned int invalid_sync_windows;
+	unsigned int undersize_sync_windows;
+	unsigned int oversize_sync_windows;
+	unsigned int rx_no_timestamp;
 	struct efx_ptp_timeset
 	timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
 };
@@ -309,19 +331,263 @@ static int efx_phc_settime(struct ptp_clock_info *ptp,
 static int efx_phc_enable(struct ptp_clock_info *ptp,
 			  struct ptp_clock_request *request, int on);
 
+#define PTP_SW_STAT(ext_name, field_name)				\
+	{ #ext_name, 0, offsetof(struct efx_ptp_data, field_name) }
+#define PTP_MC_STAT(ext_name, mcdi_name)				\
+	{ #ext_name, 32, MC_CMD_PTP_OUT_STATUS_STATS_ ## mcdi_name ## _OFST }
+static const struct efx_hw_stat_desc efx_ptp_stat_desc[] = {
+	PTP_SW_STAT(ptp_good_syncs, good_syncs),
+	PTP_SW_STAT(ptp_fast_syncs, fast_syncs),
+	PTP_SW_STAT(ptp_bad_syncs, bad_syncs),
+	PTP_SW_STAT(ptp_sync_timeouts, sync_timeouts),
+	PTP_SW_STAT(ptp_no_time_syncs, no_time_syncs),
+	PTP_SW_STAT(ptp_invalid_sync_windows, invalid_sync_windows),
+	PTP_SW_STAT(ptp_undersize_sync_windows, undersize_sync_windows),
+	PTP_SW_STAT(ptp_oversize_sync_windows, oversize_sync_windows),
+	PTP_SW_STAT(ptp_rx_no_timestamp, rx_no_timestamp),
+	PTP_MC_STAT(ptp_tx_timestamp_packets, TX),
+	PTP_MC_STAT(ptp_rx_timestamp_packets, RX),
+	PTP_MC_STAT(ptp_timestamp_packets, TS),
+	PTP_MC_STAT(ptp_filter_matches, FM),
+	PTP_MC_STAT(ptp_non_filter_matches, NFM),
+};
+#define PTP_STAT_COUNT ARRAY_SIZE(efx_ptp_stat_desc)
+static const unsigned long efx_ptp_stat_mask[] = {
+	[0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL,
+};
+
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings)
+{
+	if (!efx->ptp_data)
+		return 0;
+
+	return efx_nic_describe_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
+				      efx_ptp_stat_mask, strings);
+}
+
+size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_STATUS_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_STATUS_LEN);
+	size_t i;
+	int rc;
+
+	if (!efx->ptp_data)
+		return 0;
+
+	/* Copy software statistics */
+	for (i = 0; i < PTP_STAT_COUNT; i++) {
+		if (efx_ptp_stat_desc[i].dma_width)
+			continue;
+		stats[i] = *(unsigned int *)((char *)efx->ptp_data +
+					     efx_ptp_stat_desc[i].offset);
+	}
+
+	/* Fetch MC statistics.  We *must* fill in all statistics or
+	 * risk leaking kernel memory to userland, so if the MCDI
+	 * request fails we pretend we got zeroes.
+	 */
+	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_STATUS);
+	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+	rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), NULL);
+	if (rc) {
+		netif_err(efx, hw, efx->net_dev,
+			  "MC_CMD_PTP_OP_STATUS failed (%d)\n", rc);
+		memset(outbuf, 0, sizeof(outbuf));
+	}
+	efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
+			     efx_ptp_stat_mask,
+			     stats, _MCDI_PTR(outbuf, 0), false);
+
+	return PTP_STAT_COUNT;
+}
+
+/* For Siena platforms NIC time is s and ns */
+static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+	struct timespec ts = ns_to_timespec(ns);
+	*nic_major = ts.tv_sec;
+	*nic_minor = ts.tv_nsec;
+}
+
+static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor,
+						s32 correction)
+{
+	ktime_t kt = ktime_set(nic_major, nic_minor);
+	if (correction >= 0)
+		kt = ktime_add_ns(kt, (u64)correction);
+	else
+		kt = ktime_sub_ns(kt, (u64)-correction);
+	return kt;
+}
+
+/* To convert from s27 format to ns we multiply then divide by a power of 2.
+ * For the conversion from ns to s27, the operation is also converted to a
+ * multiply and shift.
+ */
+#define S27_TO_NS_SHIFT	(27)
+#define NS_TO_S27_MULT	(((1ULL << 63) + NSEC_PER_SEC / 2) / NSEC_PER_SEC)
+#define NS_TO_S27_SHIFT	(63 - S27_TO_NS_SHIFT)
+#define S27_MINOR_MAX	(1 << S27_TO_NS_SHIFT)
+
+/* For Huntington platforms NIC time is in seconds and fractions of a second
+ * where the minor register only uses 27 bits in units of 2^-27s.
+ */
+static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+	struct timespec ts = ns_to_timespec(ns);
+	u32 maj = ts.tv_sec;
+	u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT +
+			 (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT);
+
+	/* The conversion can result in the minor value exceeding the maximum.
+	 * In this case, round up to the next second.
+	 */
+	if (min >= S27_MINOR_MAX) {
+		min -= S27_MINOR_MAX;
+		maj++;
+	}
+
+	*nic_major = maj;
+	*nic_minor = min;
+}
+
+static inline ktime_t efx_ptp_s27_to_ktime(u32 nic_major, u32 nic_minor)
+{
+	u32 ns = (u32)(((u64)nic_minor * NSEC_PER_SEC +
+			(1ULL << (S27_TO_NS_SHIFT - 1))) >> S27_TO_NS_SHIFT);
+	return ktime_set(nic_major, ns);
+}
+
+static ktime_t efx_ptp_s27_to_ktime_correction(u32 nic_major, u32 nic_minor,
+					       s32 correction)
+{
+	/* Apply the correction and deal with carry */
+	nic_minor += correction;
+	if ((s32)nic_minor < 0) {
+		nic_minor += S27_MINOR_MAX;
+		nic_major--;
+	} else if (nic_minor >= S27_MINOR_MAX) {
+		nic_minor -= S27_MINOR_MAX;
+		nic_major++;
+	}
+
+	return efx_ptp_s27_to_ktime(nic_major, nic_minor);
+}
+
+/* Get PTP attributes and set up time conversions */
+static int efx_ptp_get_attributes(struct efx_nic *efx)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN);
+	struct efx_ptp_data *ptp = efx->ptp_data;
+	int rc;
+	u32 fmt;
+	size_t out_len;
+
+	/* Get the PTP attributes. If the NIC doesn't support the operation we
+	 * use the default format for compatibility with older NICs i.e.
+	 * seconds and nanoseconds.
+	 */
+	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES);
+	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+	rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &out_len);
+	if (rc == 0)
+		fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT);
+	else if (rc == -EINVAL)
+		fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
+	else
+		return rc;
+
+	if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) {
+		ptp->ns_to_nic_time = efx_ptp_ns_to_s27;
+		ptp->nic_to_kernel_time = efx_ptp_s27_to_ktime_correction;
+	} else if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS) {
+		ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns;
+		ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction;
+	} else {
+		return -ERANGE;
+	}
+
+	ptp->time_format = fmt;
+
+	/* MC_CMD_PTP_OP_GET_ATTRIBUTES is an extended version of an older
+	 * operation MC_CMD_PTP_OP_GET_TIME_FORMAT that also returns a value
+	 * to use for the minimum acceptable corrected synchronization window.
+	 * If we have the extra information store it. For older firmware that
+	 * does not implement the extended command use the default value.
+	 */
+	if (rc == 0 && out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN)
+		ptp->min_synchronisation_ns =
+			MCDI_DWORD(outbuf,
+				   PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN);
+	else
+		ptp->min_synchronisation_ns = DEFAULT_MIN_SYNCHRONISATION_NS;
+
+	return 0;
+}
+
+/* Get PTP timestamp corrections */
+static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN);
+	int rc;
+
+	/* Get the timestamp corrections from the NIC. If this operation is
+	 * not supported (older NICs) then no correction is required.
+	 */
+	MCDI_SET_DWORD(inbuf, PTP_IN_OP,
+		       MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS);
+	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), NULL);
+	if (rc == 0) {
+		efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf,
+			PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT);
+		efx->ptp_data->ts_corrections.rx = MCDI_DWORD(outbuf,
+			PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE);
+		efx->ptp_data->ts_corrections.pps_out = MCDI_DWORD(outbuf,
+			PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT);
+		efx->ptp_data->ts_corrections.pps_in = MCDI_DWORD(outbuf,
+			PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN);
+	} else if (rc == -EINVAL) {
+		efx->ptp_data->ts_corrections.tx = 0;
+		efx->ptp_data->ts_corrections.rx = 0;
+		efx->ptp_data->ts_corrections.pps_out = 0;
+		efx->ptp_data->ts_corrections.pps_in = 0;
+	} else {
+		return rc;
+	}
+
+	return 0;
+}
+
 /* Enable MCDI PTP support. */
 static int efx_ptp_enable(struct efx_nic *efx)
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
+	MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+	int rc;
 
 	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
 	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
 	MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
-		       efx->ptp_data->channel->channel);
+		       efx->ptp_data->channel ?
+		       efx->ptp_data->channel->channel : 0);
 	MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
 
-	return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
-			    NULL, 0, NULL);
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+				outbuf, sizeof(outbuf), NULL);
+	rc = (rc == -EALREADY) ? 0 : rc;
+	if (rc)
+		efx_mcdi_display_error(efx, MC_CMD_PTP,
+				       MC_CMD_PTP_IN_ENABLE_LEN,
+				       outbuf, sizeof(outbuf), rc);
+	return rc;
 }
 
 /* Disable MCDI PTP support.
@@ -332,11 +598,19 @@ static int efx_ptp_enable(struct efx_nic *efx)
 static int efx_ptp_disable(struct efx_nic *efx)
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
+	MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+	int rc;
 
 	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
 	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
-	return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
-			    NULL, 0, NULL);
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+				outbuf, sizeof(outbuf), NULL);
+	rc = (rc == -EALREADY) ? 0 : rc;
+	if (rc)
+		efx_mcdi_display_error(efx, MC_CMD_PTP,
+				       MC_CMD_PTP_IN_DISABLE_LEN,
+				       outbuf, sizeof(outbuf), rc);
+	return rc;
 }
 
 static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
@@ -404,11 +678,10 @@ static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
 	unsigned start_ns, end_ns;
 
 	timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
-	timeset->seconds = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_SECONDS);
-	timeset->nanoseconds = MCDI_DWORD(data,
-					 PTP_OUT_SYNCHRONIZE_NANOSECONDS);
+	timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR);
+	timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR);
 	timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
-	timeset->waitns = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
+	timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
 
 	/* Ignore seconds */
 	start_ns = timeset->host_start & MC_NANOSECOND_MASK;
@@ -437,62 +710,73 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
 		MCDI_VAR_ARRAY_LEN(response_length,
 				   PTP_OUT_SYNCHRONIZE_TIMESET);
 	unsigned i;
-	unsigned total;
 	unsigned ngood = 0;
 	unsigned last_good = 0;
 	struct efx_ptp_data *ptp = efx->ptp_data;
 	u32 last_sec;
 	u32 start_sec;
 	struct timespec delta;
+	ktime_t mc_time;
 
 	if (number_readings == 0)
 		return -EAGAIN;
 
-	/* Read the set of results and increment stats for any results that
-	 * appera to be erroneous.
+	/* Read the set of results and find the last good host-MC
+	 * synchronization result. The MC times when it finishes reading the
+	 * host time so the corrected window time should be fairly constant
+	 * for a given platform. Increment stats for any results that appear
+	 * to be erroneous.
 	 */
 	for (i = 0; i < number_readings; i++) {
+		s32 window, corrected;
+		struct timespec wait;
+
 		efx_ptp_read_timeset(
 			MCDI_ARRAY_STRUCT_PTR(synch_buf,
 					      PTP_OUT_SYNCHRONIZE_TIMESET, i),
 			&ptp->timeset[i]);
-	}
 
-	/* Find the last good host-MC synchronization result. The MC times
-	 * when it finishes reading the host time so the corrected window time
-	 * should be fairly constant for a given platform.
-	 */
-	total = 0;
-	for (i = 0; i < number_readings; i++)
-		if (ptp->timeset[i].window > ptp->timeset[i].waitns) {
-			unsigned win;
-
-			win = ptp->timeset[i].window - ptp->timeset[i].waitns;
-			if (win >= MIN_SYNCHRONISATION_NS &&
-			    win < MAX_SYNCHRONISATION_NS) {
-				total += ptp->timeset[i].window;
-				ngood++;
-				last_good = i;
-			}
+		wait = ktime_to_timespec(
+			ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0));
+		window = ptp->timeset[i].window;
+		corrected = window - wait.tv_nsec;
+
+		/* We expect the uncorrected synchronization window to be at
+		 * least as large as the interval between host start and end
+		 * times. If it is smaller than this then this is mostly likely
+		 * to be a consequence of the host's time being adjusted.
+		 * Check that the corrected sync window is in a reasonable
+		 * range. If it is out of range it is likely to be because an
+		 * interrupt or other delay occurred between reading the system
+		 * time and writing it to MC memory.
+		 */
+		if (window < SYNCHRONISATION_GRANULARITY_NS) {
+			++ptp->invalid_sync_windows;
+		} else if (corrected >= MAX_SYNCHRONISATION_NS) {
+			++ptp->oversize_sync_windows;
+		} else if (corrected < ptp->min_synchronisation_ns) {
+			++ptp->undersize_sync_windows;
+		} else {
+			ngood++;
+			last_good = i;
 		}
+	}
 
 	if (ngood == 0) {
 		netif_warn(efx, drv, efx->net_dev,
-			   "PTP no suitable synchronisations %dns\n",
-			   ptp->base_sync_ns);
+			   "PTP no suitable synchronisations\n");
 		return -EAGAIN;
 	}
 
-	/* Average minimum this synchronisation */
-	ptp->last_sync_ns = DIV_ROUND_UP(total, ngood);
-	if (!ptp->base_sync_valid || (ptp->last_sync_ns < ptp->base_sync_ns)) {
-		ptp->base_sync_valid = true;
-		ptp->base_sync_ns = ptp->last_sync_ns;
-	}
+	/* Convert the NIC time into kernel time. No correction is required-
+	 * this time is the output of a firmware process.
+	 */
+	mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
+					  ptp->timeset[last_good].minor, 0);
 
 	/* Calculate delay from actual PPS to last_time */
-	delta.tv_nsec =
-		ptp->timeset[last_good].nanoseconds +
+	delta = ktime_to_timespec(mc_time);
+	delta.tv_nsec +=
 		last_time->ts_real.tv_nsec -
 		(ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
 
@@ -553,6 +837,11 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
 		loops++;
 	}
 
+	if (loops <= 1)
+		++ptp->fast_syncs;
+	if (!time_before(jiffies, timeout))
+		++ptp->sync_timeouts;
+
 	if (ACCESS_ONCE(*start))
 		efx_ptp_send_times(efx, &last_time);
 
@@ -561,9 +850,20 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
 				 MC_CMD_PTP_IN_SYNCHRONIZE_LEN,
 				 synch_buf, sizeof(synch_buf),
 				 &response_length);
-	if (rc == 0)
+	if (rc == 0) {
 		rc = efx_ptp_process_times(efx, synch_buf, response_length,
 					   &last_time);
+		if (rc == 0)
+			++ptp->good_syncs;
+		else
+			++ptp->no_time_syncs;
+	}
+
+	/* Increment the bad syncs counter if the synchronize fails, whatever
+	 * the reason.
+	 */
+	if (rc != 0)
+		++ptp->bad_syncs;
 
 	return rc;
 }
@@ -602,9 +902,10 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
 		goto fail;
 
 	memset(&timestamps, 0, sizeof(timestamps));
-	timestamps.hwtstamp = ktime_set(
-		MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_SECONDS),
-		MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_NANOSECONDS));
+	timestamps.hwtstamp = ptp_data->nic_to_kernel_time(
+		MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MAJOR),
+		MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MINOR),
+		ptp_data->ts_corrections.tx);
 
 	skb_tstamp_tx(skb, &timestamps);
 
@@ -622,6 +923,9 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
 	struct list_head *cursor;
 	struct list_head *next;
 
+	if (ptp->rx_ts_inline)
+		return;
+
 	/* Drop time-expired events */
 	spin_lock_bh(&ptp->evt_lock);
 	if (!list_empty(&ptp->evt_list)) {
@@ -655,6 +959,8 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
 	struct efx_ptp_match *match;
 	enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED;
 
+	WARN_ON_ONCE(ptp->rx_ts_inline);
+
 	spin_lock_bh(&ptp->evt_lock);
 	evts_waiting = !list_empty(&ptp->evt_list);
 	spin_unlock_bh(&ptp->evt_lock);
@@ -696,13 +1002,10 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
 /* Process any queued receive events and corresponding packets
  *
  * q is returned with all the packets that are ready for delivery.
- * true is returned if at least one of those packets requires
- * synchronisation.
  */
-static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
+static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
 {
 	struct efx_ptp_data *ptp = efx->ptp_data;
-	bool rc = false;
 	struct sk_buff *skb;
 
 	while ((skb = skb_dequeue(&ptp->rxq))) {
@@ -713,13 +1016,10 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
 			__skb_queue_tail(q, skb);
 		} else if (efx_ptp_match_rx(efx, skb) ==
 			   PTP_PACKET_STATE_MATCHED) {
-			rc = true;
 			__skb_queue_tail(q, skb);
 		} else if (time_after(jiffies, match->expiry)) {
 			match->state = PTP_PACKET_STATE_TIMED_OUT;
-			if (net_ratelimit())
-				netif_warn(efx, rx_err, efx->net_dev,
-					   "PTP packet - no timestamp seen\n");
+			++ptp->rx_no_timestamp;
 			__skb_queue_tail(q, skb);
 		} else {
 			/* Replace unprocessed entry and stop */
@@ -727,8 +1027,6 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
 			break;
 		}
 	}
-
-	return rc;
 }
 
 /* Complete processing of a received packet */
@@ -739,13 +1037,27 @@ static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb)
 	local_bh_enable();
 }
 
-static int efx_ptp_start(struct efx_nic *efx)
+static void efx_ptp_remove_multicast_filters(struct efx_nic *efx)
+{
+	struct efx_ptp_data *ptp = efx->ptp_data;
+
+	if (ptp->rxfilter_installed) {
+		efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+					  ptp->rxfilter_general);
+		efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+					  ptp->rxfilter_event);
+		ptp->rxfilter_installed = false;
+	}
+}
+
+static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
 {
 	struct efx_ptp_data *ptp = efx->ptp_data;
 	struct efx_filter_spec rxfilter;
 	int rc;
 
-	ptp->reset_required = false;
+	if (!ptp->channel || ptp->rxfilter_installed)
+		return 0;
 
 	/* Must filter on both event and general ports to ensure
 	 * that there is no packet re-ordering.
@@ -778,23 +1090,37 @@ static int efx_ptp_start(struct efx_nic *efx)
 		goto fail;
 	ptp->rxfilter_general = rc;
 
+	ptp->rxfilter_installed = true;
+	return 0;
+
+fail:
+	efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+				  ptp->rxfilter_event);
+	return rc;
+}
+
+static int efx_ptp_start(struct efx_nic *efx)
+{
+	struct efx_ptp_data *ptp = efx->ptp_data;
+	int rc;
+
+	ptp->reset_required = false;
+
+	rc = efx_ptp_insert_multicast_filters(efx);
+	if (rc)
+		return rc;
+
 	rc = efx_ptp_enable(efx);
 	if (rc != 0)
-		goto fail2;
+		goto fail;
 
 	ptp->evt_frag_idx = 0;
 	ptp->current_adjfreq = 0;
-	ptp->rxfilter_installed = true;
 
 	return 0;
 
-fail2:
-	efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
-				  ptp->rxfilter_general);
 fail:
-	efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
-				  ptp->rxfilter_event);
-
+	efx_ptp_remove_multicast_filters(efx);
 	return rc;
 }
 
@@ -810,13 +1136,7 @@ static int efx_ptp_stop(struct efx_nic *efx)
 
 	rc = efx_ptp_disable(efx);
 
-	if (ptp->rxfilter_installed) {
-		efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
-					  ptp->rxfilter_general);
-		efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
-					  ptp->rxfilter_event);
-		ptp->rxfilter_installed = false;
-	}
+	efx_ptp_remove_multicast_filters(efx);
 
 	/* Make sure RX packets are really delivered */
 	efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq);
@@ -844,7 +1164,7 @@ static void efx_ptp_pps_worker(struct work_struct *work)
 {
 	struct efx_ptp_data *ptp =
 		container_of(work, struct efx_ptp_data, pps_work);
-	struct efx_nic *efx = ptp->channel->efx;
+	struct efx_nic *efx = ptp->efx;
 	struct ptp_clock_event ptp_evt;
 
 	if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS))
@@ -855,13 +1175,11 @@ static void efx_ptp_pps_worker(struct work_struct *work)
 	ptp_clock_event(ptp->phc_clock, &ptp_evt);
 }
 
-/* Process any pending transmissions and timestamp any received packets.
- */
 static void efx_ptp_worker(struct work_struct *work)
 {
 	struct efx_ptp_data *ptp_data =
 		container_of(work, struct efx_ptp_data, work);
-	struct efx_nic *efx = ptp_data->channel->efx;
+	struct efx_nic *efx = ptp_data->efx;
 	struct sk_buff *skb;
 	struct sk_buff_head tempq;
 
@@ -874,42 +1192,50 @@ static void efx_ptp_worker(struct work_struct *work)
 	efx_ptp_drop_time_expired_events(efx);
 
 	__skb_queue_head_init(&tempq);
-	if (efx_ptp_process_events(efx, &tempq) ||
-	    !skb_queue_empty(&ptp_data->txq)) {
+	efx_ptp_process_events(efx, &tempq);
 
-		while ((skb = skb_dequeue(&ptp_data->txq)))
-			efx_ptp_xmit_skb(efx, skb);
-	}
+	while ((skb = skb_dequeue(&ptp_data->txq)))
+		efx_ptp_xmit_skb(efx, skb);
 
 	while ((skb = __skb_dequeue(&tempq)))
 		efx_ptp_process_rx(efx, skb);
 }
 
-/* Initialise PTP channel and state.
- *
- * Setting core_index to zero causes the queue to be initialised and doesn't
- * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
- */
-static int efx_ptp_probe_channel(struct efx_channel *channel)
+static const struct ptp_clock_info efx_phc_clock_info = {
+	.owner		= THIS_MODULE,
+	.name		= "sfc",
+	.max_adj	= MAX_PPB,
+	.n_alarm	= 0,
+	.n_ext_ts	= 0,
+	.n_per_out	= 0,
+	.pps		= 1,
+	.adjfreq	= efx_phc_adjfreq,
+	.adjtime	= efx_phc_adjtime,
+	.gettime	= efx_phc_gettime,
+	.settime	= efx_phc_settime,
+	.enable		= efx_phc_enable,
+};
+
+/* Initialise PTP state. */
+int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
 {
-	struct efx_nic *efx = channel->efx;
 	struct efx_ptp_data *ptp;
 	int rc = 0;
 	unsigned int pos;
 
-	channel->irq_moderation = 0;
-	channel->rx_queue.core_index = 0;
-
 	ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
 	efx->ptp_data = ptp;
 	if (!efx->ptp_data)
 		return -ENOMEM;
 
+	ptp->efx = efx;
+	ptp->channel = channel;
+	ptp->rx_ts_inline = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+
 	rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL);
 	if (rc != 0)
 		goto fail1;
 
-	ptp->channel = channel;
 	skb_queue_head_init(&ptp->rxq);
 	skb_queue_head_init(&ptp->txq);
 	ptp->workwq = create_singlethread_workqueue("sfc_ptp");
@@ -929,33 +1255,32 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
 		list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
 	ptp->evt_overflow = false;
 
-	ptp->phc_clock_info.owner = THIS_MODULE;
-	snprintf(ptp->phc_clock_info.name,
-		 sizeof(ptp->phc_clock_info.name),
-		 "%pm", efx->net_dev->perm_addr);
-	ptp->phc_clock_info.max_adj = MAX_PPB;
-	ptp->phc_clock_info.n_alarm = 0;
-	ptp->phc_clock_info.n_ext_ts = 0;
-	ptp->phc_clock_info.n_per_out = 0;
-	ptp->phc_clock_info.pps = 1;
-	ptp->phc_clock_info.adjfreq = efx_phc_adjfreq;
-	ptp->phc_clock_info.adjtime = efx_phc_adjtime;
-	ptp->phc_clock_info.gettime = efx_phc_gettime;
-	ptp->phc_clock_info.settime = efx_phc_settime;
-	ptp->phc_clock_info.enable = efx_phc_enable;
-
-	ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
-					    &efx->pci_dev->dev);
-	if (IS_ERR(ptp->phc_clock)) {
-		rc = PTR_ERR(ptp->phc_clock);
+	/* Get the NIC PTP attributes and set up time conversions */
+	rc = efx_ptp_get_attributes(efx);
+	if (rc < 0)
 		goto fail3;
-	}
 
-	INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
-	ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
-	if (!ptp->pps_workwq) {
-		rc = -ENOMEM;
-		goto fail4;
+	/* Get the timestamp corrections */
+	rc = efx_ptp_get_timestamp_corrections(efx);
+	if (rc < 0)
+		goto fail3;
+
+	if (efx->mcdi->fn_flags &
+	    (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) {
+		ptp->phc_clock_info = efx_phc_clock_info;
+		ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
+						    &efx->pci_dev->dev);
+		if (IS_ERR(ptp->phc_clock)) {
+			rc = PTR_ERR(ptp->phc_clock);
+			goto fail3;
+		}
+
+		INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
+		ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
+		if (!ptp->pps_workwq) {
+			rc = -ENOMEM;
+			goto fail4;
+		}
 	}
 	ptp->nic_ts_enabled = false;
 
@@ -976,14 +1301,27 @@ fail1:
 	return rc;
 }
 
-static void efx_ptp_remove_channel(struct efx_channel *channel)
+/* Initialise PTP channel.
+ *
+ * Setting core_index to zero causes the queue to be initialised and doesn't
+ * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
+ */
+static int efx_ptp_probe_channel(struct efx_channel *channel)
 {
 	struct efx_nic *efx = channel->efx;
 
+	channel->irq_moderation = 0;
+	channel->rx_queue.core_index = 0;
+
+	return efx_ptp_probe(efx, channel);
+}
+
+void efx_ptp_remove(struct efx_nic *efx)
+{
 	if (!efx->ptp_data)
 		return;
 
-	(void)efx_ptp_disable(channel->efx);
+	(void)efx_ptp_disable(efx);
 
 	cancel_work_sync(&efx->ptp_data->work);
 	cancel_work_sync(&efx->ptp_data->pps_work);
@@ -991,15 +1329,22 @@ static void efx_ptp_remove_channel(struct efx_channel *channel)
 	skb_queue_purge(&efx->ptp_data->rxq);
 	skb_queue_purge(&efx->ptp_data->txq);
 
-	ptp_clock_unregister(efx->ptp_data->phc_clock);
+	if (efx->ptp_data->phc_clock) {
+		destroy_workqueue(efx->ptp_data->pps_workwq);
+		ptp_clock_unregister(efx->ptp_data->phc_clock);
+	}
 
 	destroy_workqueue(efx->ptp_data->workwq);
-	destroy_workqueue(efx->ptp_data->pps_workwq);
 
 	efx_nic_free_buffer(efx, &efx->ptp_data->start);
 	kfree(efx->ptp_data);
 }
 
+static void efx_ptp_remove_channel(struct efx_channel *channel)
+{
+	efx_ptp_remove(channel->efx);
+}
+
 static void efx_ptp_get_channel_name(struct efx_channel *channel,
 				     char *buf, size_t len)
 {
@@ -1080,14 +1425,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
 
 	/* Does this packet require timestamping? */
 	if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
-		struct skb_shared_hwtstamps *timestamps;
-
 		match->state = PTP_PACKET_STATE_UNMATCHED;
 
-		/* Clear all timestamps held: filled in later */
-		timestamps = skb_hwtstamps(skb);
-		memset(timestamps, 0, sizeof(*timestamps));
-
 		/* We expect the sequence number to be in the same position in
 		 * the packet for PTP V1 and V2
 		 */
@@ -1132,8 +1471,13 @@ int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
 	return NETDEV_TX_OK;
 }
 
-static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
-			       unsigned int new_mode)
+int efx_ptp_get_mode(struct efx_nic *efx)
+{
+	return efx->ptp_data->mode;
+}
+
+int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+			unsigned int new_mode)
 {
 	if ((enable_wanted != efx->ptp_data->enabled) ||
 	    (enable_wanted && (efx->ptp_data->mode != new_mode))) {
@@ -1177,8 +1521,6 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
 
 static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
 {
-	bool enable_wanted = false;
-	unsigned int new_mode;
 	int rc;
 
 	if (init->flags)
@@ -1188,63 +1530,20 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
 	    (init->tx_type != HWTSTAMP_TX_ON))
 		return -ERANGE;
 
-	new_mode = efx->ptp_data->mode;
-	/* Determine whether any PTP HW operations are required */
-	switch (init->rx_filter) {
-	case HWTSTAMP_FILTER_NONE:
-		break;
-	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
-	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
-	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
-		init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
-		new_mode = MC_CMD_PTP_MODE_V1;
-		enable_wanted = true;
-		break;
-	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
-	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
-	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
-	/* Although these three are accepted only IPV4 packets will be
-	 * timestamped
-	 */
-		init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
-		new_mode = MC_CMD_PTP_MODE_V2_ENHANCED;
-		enable_wanted = true;
-		break;
-	case HWTSTAMP_FILTER_PTP_V2_EVENT:
-	case HWTSTAMP_FILTER_PTP_V2_SYNC:
-	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
-	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
-	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
-	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
-		/* Non-IP + IPv6 timestamping not supported */
-		return -ERANGE;
-		break;
-	default:
-		return -ERANGE;
-	}
-
-	if (init->tx_type != HWTSTAMP_TX_OFF)
-		enable_wanted = true;
-
-	/* Old versions of the firmware do not support the improved
-	 * UUID filtering option (SF bug 33070).  If the firmware does
-	 * not accept the enhanced mode, fall back to the standard PTP
-	 * v2 UUID filtering.
-	 */
-	rc = efx_ptp_change_mode(efx, enable_wanted, new_mode);
-	if ((rc != 0) && (new_mode == MC_CMD_PTP_MODE_V2_ENHANCED))
-		rc = efx_ptp_change_mode(efx, enable_wanted, MC_CMD_PTP_MODE_V2);
-	if (rc != 0)
+	rc = efx->type->ptp_set_ts_config(efx, init);
+	if (rc)
 		return rc;
 
 	efx->ptp_data->config = *init;
-
 	return 0;
 }
 
 void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
 {
 	struct efx_ptp_data *ptp = efx->ptp_data;
+	struct efx_nic *primary = efx->primary;
+
+	ASSERT_RTNL();
 
 	if (!ptp)
 		return;
@@ -1252,18 +1551,14 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
 	ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
 				     SOF_TIMESTAMPING_RX_HARDWARE |
 				     SOF_TIMESTAMPING_RAW_HARDWARE);
-	ts_info->phc_index = ptp_clock_index(ptp->phc_clock);
+	if (primary && primary->ptp_data && primary->ptp_data->phc_clock)
+		ts_info->phc_index =
+			ptp_clock_index(primary->ptp_data->phc_clock);
 	ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON;
-	ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE |
-			       1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
-			       1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
-			       1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
-			       1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
-			       1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
-			       1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+	ts_info->rx_filters = ptp->efx->type->hwtstamp_filters;
 }
 
-int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
+int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr)
 {
 	struct hwtstamp_config config;
 	int rc;
@@ -1283,6 +1578,15 @@ int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
 		? -EFAULT : 0;
 }
 
+int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr)
+{
+	if (!efx->ptp_data)
+		return -EOPNOTSUPP;
+
+	return copy_to_user(ifr->ifr_data, &efx->ptp_data->config,
+			    sizeof(efx->ptp_data->config)) ? -EFAULT : 0;
+}
+
 static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len)
 {
 	struct efx_ptp_data *ptp = efx->ptp_data;
@@ -1302,6 +1606,9 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
 {
 	struct efx_ptp_event_rx *evt = NULL;
 
+	if (WARN_ON_ONCE(ptp->rx_ts_inline))
+		return;
+
 	if (ptp->evt_frag_idx != 3) {
 		ptp_event_failure(efx, 3);
 		return;
@@ -1320,9 +1627,10 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
 					      MCDI_EVENT_SRC) << 8) |
 			     (EFX_QWORD_FIELD(ptp->evt_frags[0],
 					      MCDI_EVENT_SRC) << 16));
-		evt->hwtimestamp = ktime_set(
+		evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time(
 			EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA),
-			EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA));
+			EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA),
+			ptp->ts_corrections.rx);
 		evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
 		list_add_tail(&evt->link, &ptp->evt_list);
 
@@ -1397,12 +1705,99 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
 	}
 }
 
+void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev)
+{
+	channel->sync_timestamp_major = MCDI_EVENT_FIELD(*ev, PTP_TIME_MAJOR);
+	channel->sync_timestamp_minor =
+		MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_26_19) << 19;
+	/* if sync events have been disabled then we want to silently ignore
+	 * this event, so throw away result.
+	 */
+	(void) cmpxchg(&channel->sync_events_state, SYNC_EVENTS_REQUESTED,
+		       SYNC_EVENTS_VALID);
+}
+
+/* make some assumptions about the time representation rather than abstract it,
+ * since we currently only support one type of inline timestamping and only on
+ * EF10.
+ */
+#define MINOR_TICKS_PER_SECOND 0x8000000
+/* Fuzz factor for sync events to be out of order with RX events */
+#define FUZZ (MINOR_TICKS_PER_SECOND / 10)
+#define EXPECTED_SYNC_EVENTS_PER_SECOND 4
+
+static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, const u8 *eh)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+	return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_ts_offset));
+#else
+	const u8 *data = eh + efx->rx_packet_ts_offset;
+	return (u32)data[0]       |
+	       (u32)data[1] << 8  |
+	       (u32)data[2] << 16 |
+	       (u32)data[3] << 24;
+#endif
+}
+
+void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+				   struct sk_buff *skb)
+{
+	struct efx_nic *efx = channel->efx;
+	u32 pkt_timestamp_major, pkt_timestamp_minor;
+	u32 diff, carry;
+	struct skb_shared_hwtstamps *timestamps;
+
+	pkt_timestamp_minor = (efx_rx_buf_timestamp_minor(efx,
+							  skb_mac_header(skb)) +
+			       (u32) efx->ptp_data->ts_corrections.rx) &
+			      (MINOR_TICKS_PER_SECOND - 1);
+
+	/* get the difference between the packet and sync timestamps,
+	 * modulo one second
+	 */
+	diff = (pkt_timestamp_minor - channel->sync_timestamp_minor) &
+		(MINOR_TICKS_PER_SECOND - 1);
+	/* do we roll over a second boundary and need to carry the one? */
+	carry = channel->sync_timestamp_minor + diff > MINOR_TICKS_PER_SECOND ?
+		1 : 0;
+
+	if (diff <= MINOR_TICKS_PER_SECOND / EXPECTED_SYNC_EVENTS_PER_SECOND +
+		    FUZZ) {
+		/* packet is ahead of the sync event by a quarter of a second or
+		 * less (allowing for fuzz)
+		 */
+		pkt_timestamp_major = channel->sync_timestamp_major + carry;
+	} else if (diff >= MINOR_TICKS_PER_SECOND - FUZZ) {
+		/* packet is behind the sync event but within the fuzz factor.
+		 * This means the RX packet and sync event crossed as they were
+		 * placed on the event queue, which can sometimes happen.
+		 */
+		pkt_timestamp_major = channel->sync_timestamp_major - 1 + carry;
+	} else {
+		/* it's outside tolerance in both directions. this might be
+		 * indicative of us missing sync events for some reason, so
+		 * we'll call it an error rather than risk giving a bogus
+		 * timestamp.
+		 */
+		netif_vdbg(efx, drv, efx->net_dev,
+			  "packet timestamp %x too far from sync event %x:%x\n",
+			  pkt_timestamp_minor, channel->sync_timestamp_major,
+			  channel->sync_timestamp_minor);
+		return;
+	}
+
+	/* attach the timestamps to the skb */
+	timestamps = skb_hwtstamps(skb);
+	timestamps->hwtstamp =
+		efx_ptp_s27_to_ktime(pkt_timestamp_major, pkt_timestamp_minor);
+}
+
 static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
 {
 	struct efx_ptp_data *ptp_data = container_of(ptp,
 						     struct efx_ptp_data,
 						     phc_clock_info);
-	struct efx_nic *efx = ptp_data->channel->efx;
+	struct efx_nic *efx = ptp_data->efx;
 	MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN);
 	s64 adjustment_ns;
 	int rc;
@@ -1432,18 +1827,20 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
 
 static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
 {
+	u32 nic_major, nic_minor;
 	struct efx_ptp_data *ptp_data = container_of(ptp,
 						     struct efx_ptp_data,
 						     phc_clock_info);
-	struct efx_nic *efx = ptp_data->channel->efx;
-	struct timespec delta_ts = ns_to_timespec(delta);
+	struct efx_nic *efx = ptp_data->efx;
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN);
 
+	efx->ptp_data->ns_to_nic_time(delta, &nic_major, &nic_minor);
+
 	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
 	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
 	MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq);
-	MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
-	MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
+	MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MAJOR, nic_major);
+	MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MINOR, nic_minor);
 	return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
 			    NULL, 0, NULL);
 }
@@ -1453,10 +1850,11 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
 	struct efx_ptp_data *ptp_data = container_of(ptp,
 						     struct efx_ptp_data,
 						     phc_clock_info);
-	struct efx_nic *efx = ptp_data->channel->efx;
+	struct efx_nic *efx = ptp_data->efx;
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN);
 	MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN);
 	int rc;
+	ktime_t kt;
 
 	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
 	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
@@ -1466,8 +1864,10 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
 	if (rc != 0)
 		return rc;
 
-	ts->tv_sec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_SECONDS);
-	ts->tv_nsec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_NANOSECONDS);
+	kt = ptp_data->nic_to_kernel_time(
+		MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MAJOR),
+		MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MINOR), 0);
+	*ts = ktime_to_timespec(kt);
 	return 0;
 }
 
@@ -1519,7 +1919,7 @@ static const struct efx_channel_type efx_ptp_channel_type = {
 	.keep_eventq		= false,
 };
 
-void efx_ptp_probe(struct efx_nic *efx)
+void efx_ptp_defer_probe_with_channel(struct efx_nic *efx)
 {
 	/* Check whether PTP is implemented on this NIC.  The DISABLE
 	 * operation will succeed if and only if it is implemented.
@@ -1533,9 +1933,15 @@ void efx_ptp_start_datapath(struct efx_nic *efx)
 {
 	if (efx_ptp_restart(efx))
 		netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n");
+	/* re-enable timestamping if it was previously enabled */
+	if (efx->type->ptp_set_ts_sync_events)
+		efx->type->ptp_set_ts_sync_events(efx, true, true);
 }
 
 void efx_ptp_stop_datapath(struct efx_nic *efx)
 {
+	/* temporarily disable timestamping */
+	if (efx->type->ptp_set_ts_sync_events)
+		efx->type->ptp_set_ts_sync_events(efx, false, true);
 	efx_ptp_stop(efx);
 }
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 42488df1f4ec..48588ddf81b0 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -149,7 +149,7 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
  * 0 on success. If a single page can be used for multiple buffers,
  * then the page will either be inserted fully, or not at all.
  */
-static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
+static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
 {
 	struct efx_nic *efx = rx_queue->efx;
 	struct efx_rx_buffer *rx_buf;
@@ -163,7 +163,8 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
 	do {
 		page = efx_reuse_page(rx_queue);
 		if (page == NULL) {
-			page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
+			page = alloc_pages(__GFP_COLD | __GFP_COMP |
+					   (atomic ? GFP_ATOMIC : GFP_KERNEL),
 					   efx->rx_buffer_order);
 			if (unlikely(page == NULL))
 				return -ENOMEM;
@@ -321,7 +322,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel,
  * this means this function must run from the NAPI handler, or be called
  * when NAPI is disabled.
  */
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
 {
 	struct efx_nic *efx = rx_queue->efx;
 	unsigned int fill_level, batch_size;
@@ -354,7 +355,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
 
 
 	do {
-		rc = efx_init_rx_buffers(rx_queue);
+		rc = efx_init_rx_buffers(rx_queue, atomic);
 		if (unlikely(rc)) {
 			/* Ensure that we don't leave the rx queue empty */
 			if (rx_queue->added_count == rx_queue->removed_count)
@@ -439,7 +440,8 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
 	}
 
 	if (efx->net_dev->features & NETIF_F_RXHASH)
-		skb->rxhash = efx_rx_buf_hash(efx, eh);
+		skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
+			     PKT_HASH_TYPE_L3);
 	skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
 			  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
 
@@ -475,14 +477,18 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
 	struct sk_buff *skb;
 
 	/* Allocate an SKB to store the headers */
-	skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
+	skb = netdev_alloc_skb(efx->net_dev,
+			       efx->rx_ip_align + efx->rx_prefix_size +
+			       hdr_len);
 	if (unlikely(skb == NULL))
 		return NULL;
 
 	EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
 
-	skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
-	memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
+	memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
+	       efx->rx_prefix_size + hdr_len);
+	skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
+	__skb_put(skb, hdr_len);
 
 	/* Append the remaining page(s) onto the frag list */
 	if (rx_buf->len > hdr_len) {
@@ -619,6 +625,8 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
 	if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 
+	efx_rx_skb_attach_timestamp(channel, skb);
+
 	if (channel->type->receive_skb)
 		if (channel->type->receive_skb(channel, skb))
 			return;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 144bbff5a4ae..26641817a9c7 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -722,7 +722,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
 			return rc_reset;
 		}
 
-		if ((tests->registers < 0) && !rc_test)
+		if ((tests->memory < 0 || tests->registers < 0) && !rc_test)
 			rc_test = -EIO;
 	}
 
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index a2f4a06ffa4e..009dbe88f3be 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -38,6 +38,7 @@ struct efx_self_tests {
 	int eventq_dma[EFX_MAX_CHANNELS];
 	int eventq_int[EFX_MAX_CHANNELS];
 	/* offline tests */
+	int memory;
 	int registers;
 	int phy_ext[EFX_MAX_PHY_TESTS];
 	struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index d034bcd124ef..23f3a6f7737a 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -118,6 +118,54 @@ out:
 
 /**************************************************************************
  *
+ * PTP
+ *
+ **************************************************************************
+ */
+
+static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
+{
+	_efx_writed(efx, cpu_to_le32(host_time),
+		    FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
+}
+
+static int siena_ptp_set_ts_config(struct efx_nic *efx,
+				   struct hwtstamp_config *init)
+{
+	int rc;
+
+	switch (init->rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		/* if TX timestamping is still requested then leave PTP on */
+		return efx_ptp_change_mode(efx,
+					   init->tx_type != HWTSTAMP_TX_OFF,
+					   efx_ptp_get_mode(efx));
+	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+		init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+		return efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V1);
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+		init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+		rc = efx_ptp_change_mode(efx, true,
+					 MC_CMD_PTP_MODE_V2_ENHANCED);
+		/* bug 33070 - old versions of the firmware do not support the
+		 * improved UUID filtering option. Similarly old versions of the
+		 * application do not expect it to be enabled. If the firmware
+		 * does not accept the enhanced mode, fall back to the standard
+		 * PTP v2 UUID filtering. */
+		if (rc != 0)
+			rc = efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V2);
+		return rc;
+	default:
+		return -ERANGE;
+	}
+}
+
+/**************************************************************************
+ *
  * Device reset
  *
  **************************************************************************
@@ -259,7 +307,7 @@ static int siena_probe_nic(struct efx_nic *efx)
 		goto fail5;
 
 	efx_sriov_probe(efx);
-	efx_ptp_probe(efx);
+	efx_ptp_defer_probe_with_channel(efx);
 
 	return 0;
 
@@ -273,6 +321,31 @@ fail1:
 	return rc;
 }
 
+static void siena_rx_push_rss_config(struct efx_nic *efx)
+{
+	efx_oword_t temp;
+
+	/* Set hash key for IPv4 */
+	memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+	efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
+	/* Enable IPv6 RSS */
+	BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
+		     2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
+		     FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
+	memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+	efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
+	memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
+	efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
+	EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
+			     FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
+	memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
+	       FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
+	efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+
+	efx_farch_rx_push_indir_table(efx);
+}
+
 /* This call performs hardware-specific global initialisation, such as
  * defining the descriptor cache sizes and number of RSS channels.
  * It does not set up any buffers, descriptor rings or event queues.
@@ -313,23 +386,7 @@ static int siena_init_nic(struct efx_nic *efx)
 			    EFX_RX_USR_BUF_SIZE >> 5);
 	efx_writeo(efx, &temp, FR_AZ_RX_CFG);
 
-	/* Set hash key for IPv4 */
-	memcpy(&temp, efx->rx_hash_key, sizeof(temp));
-	efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
-
-	/* Enable IPv6 RSS */
-	BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
-		     2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
-		     FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
-	memcpy(&temp, efx->rx_hash_key, sizeof(temp));
-	efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
-	memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
-	efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
-	EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
-			     FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
-	memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
-	       FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
-	efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+	siena_rx_push_rss_config(efx);
 
 	/* Enable event logging */
 	rc = efx_mcdi_log_ctrl(efx, true, false, 0);
@@ -458,6 +515,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
 		return -EAGAIN;
 
 	/* Update derived statistics */
+	efx_nic_fix_nodesc_drop_stat(efx,
+				     &stats[SIENA_STAT_rx_nodesc_drop_cnt]);
 	efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes],
 			     stats[SIENA_STAT_tx_bytes] -
 			     stats[SIENA_STAT_tx_bad_bytes]);
@@ -837,19 +896,6 @@ fail:
 
 /**************************************************************************
  *
- * PTP
- *
- **************************************************************************
- */
-
-static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
-{
-	_efx_writed(efx, cpu_to_le32(host_time),
-		    FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
-}
-
-/**************************************************************************
- *
  * Revision-dependent attributes used by efx.c and nic.c
  *
  **************************************************************************
@@ -878,6 +924,7 @@ const struct efx_nic_type siena_a0_nic_type = {
 	.describe_stats = siena_describe_nic_stats,
 	.update_stats = siena_update_nic_stats,
 	.start_stats = efx_mcdi_mac_start_stats,
+	.pull_stats = efx_mcdi_mac_pull_stats,
 	.stop_stats = efx_mcdi_mac_stop_stats,
 	.set_id_led = efx_mcdi_set_id_led,
 	.push_irq_moderation = siena_push_irq_moderation,
@@ -902,7 +949,7 @@ const struct efx_nic_type siena_a0_nic_type = {
 	.tx_init = efx_farch_tx_init,
 	.tx_remove = efx_farch_tx_remove,
 	.tx_write = efx_farch_tx_write,
-	.rx_push_indir_table = efx_farch_rx_push_indir_table,
+	.rx_push_rss_config = siena_rx_push_rss_config,
 	.rx_probe = efx_farch_rx_probe,
 	.rx_init = efx_farch_rx_init,
 	.rx_remove = efx_farch_rx_remove,
@@ -939,6 +986,7 @@ const struct efx_nic_type siena_a0_nic_type = {
 	.mtd_sync = efx_mcdi_mtd_sync,
 #endif
 	.ptp_write_host_time = siena_ptp_write_host_time,
+	.ptp_set_ts_config = siena_ptp_set_ts_config,
 
 	.revision = EFX_REV_SIENA_A0,
 	.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
@@ -957,4 +1005,11 @@ const struct efx_nic_type siena_a0_nic_type = {
 			     NETIF_F_RXHASH | NETIF_F_NTUPLE),
 	.mcdi_max_ver = 1,
 	.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
+	.hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE |
+			     1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
+			     1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
+			     1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
+			     1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
+			     1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
+			     1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ),
 };
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index ffa78432164d..7984ad05357d 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -30,7 +30,6 @@
 #define IOC3_NAME	"ioc3-eth"
 #define IOC3_VERSION	"2.6.3-4"
 
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 513ed8b1ba58..5564a5fa3385 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -10,7 +10,6 @@
  */
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 975dc2d8e548..ff57a46388ee 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -576,7 +576,6 @@ err_unmap_tx:
 err_out_unmap:
 	pci_iounmap(pci_dev, ioaddr);
 err_out_cleardev:
-	pci_set_drvdata(pci_dev, NULL);
 	pci_release_regions(pci_dev);
  err_out:
 	free_netdev(net_dev);
@@ -2427,7 +2426,6 @@ static void sis900_remove(struct pci_dev *pci_dev)
 	pci_iounmap(pci_dev, sis_priv->ioaddr);
 	free_netdev(net_dev);
 	pci_release_regions(pci_dev);
-	pci_set_drvdata(pci_dev, NULL);
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 0f096a890059..c50fb08c9905 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -17,8 +17,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * Arguments:
  *	 watchdog  = TX watchdog timeout
@@ -55,7 +54,6 @@ static const char version[] =
 			 )
 #endif
 
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h
index 9965da39281b..04b35f55df97 100644
--- a/drivers/net/ethernet/smsc/smc911x.h
+++ b/drivers/net/ethernet/smsc/smc911x.h
@@ -15,8 +15,7 @@
  . GNU General Public License for more details.
  .
  . You should have received a copy of the GNU General Public License
- . along with this program; if not, write to the Free Software
- . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ . along with this program; if not, see <http://www.gnu.org/licenses/>.
  .
  . Information contained in this file was obtained from the LAN9118
  . manual from SMC.  To get a copy, if you really want one, you can find
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 8ef70d9c20c1..c7a4868571f9 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -29,7 +29,6 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/timer.h>
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 8bf29eb4a5a0..839c0e6cca01 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -19,8 +19,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * Arguments:
  * 	io	= for the base address
@@ -66,7 +65,6 @@ static const char version[] =
 #endif
 
 
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -1895,7 +1893,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
 	SMC_SELECT_BANK(lp, 1);
 	val = SMC_GET_BASE(lp);
 	val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT;
-	if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
+	if (((unsigned long)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
 		netdev_warn(dev, "%s: IOADDR %p doesn't match configuration (%x).\n",
 			    CARDNAME, ioaddr, val);
 	}
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 749654b976bc..47dce918eb0f 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -18,8 +18,7 @@
  . GNU General Public License for more details.
  .
  . You should have received a copy of the GNU General Public License
- . along with this program; if not, write to the Free Software
- . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ . along with this program; if not, see <http://www.gnu.org/licenses/>.
  .
  . Information contained in this file was obtained from the LAN91C111
  . manual from SMC.  To get a copy, if you really want one, you can find
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 8564f23a6796..6382b7c416f4 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  ***************************************************************************
  * Rewritten, heavily based on smsc911x simple driver by SMSC.
diff --git a/drivers/net/ethernet/smsc/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h
index 9ad5e5d39a03..23953957fed8 100644
--- a/drivers/net/ethernet/smsc/smsc911x.h
+++ b/drivers/net/ethernet/smsc/smsc911x.h
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  ***************************************************************************/
 #ifndef __SMSC911X_H__
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index f433d97aa097..d3b967aff9e0 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -13,8 +13,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  ***************************************************************************
  */
@@ -1541,7 +1540,7 @@ static int smsc9420_resume(struct pci_dev *pdev)
 
 	pci_set_master(pdev);
 
-	err = pci_enable_wake(pdev, 0, 0);
+	err = pci_enable_wake(pdev, PCI_D0, 0);
 	if (err)
 		netif_warn(pd, ifup, pd->dev, "pci_enable_wake failed: %d\n",
 			   err);
diff --git a/drivers/net/ethernet/smsc/smsc9420.h b/drivers/net/ethernet/smsc/smsc9420.h
index e441402f77a2..c63c76381af6 100644
--- a/drivers/net/ethernet/smsc/smsc9420.h
+++ b/drivers/net/ethernet/smsc/smsc9420.h
@@ -13,8 +13,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  ***************************************************************************
  */
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 6e52c0f74cd9..e2f202e3932f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -5,6 +5,7 @@ config STMMAC_ETH
 	select PHYLIB
 	select CRC32
 	select PTP_1588_CLOCK
+	select RESET_CONTROLLER
 	---help---
 	  This is the driver for the Ethernet IPs are built around a
 	  Synopsys IP Core and only tested on the STMicroelectronics
@@ -25,6 +26,17 @@ config STMMAC_PLATFORM
 
 	  If unsure, say N.
 
+config DWMAC_SUNXI
+	bool "Allwinner GMAC support"
+	depends on STMMAC_PLATFORM && ARCH_SUNXI
+	default y
+	---help---
+	  Support for Allwinner A20/A31 GMAC ethernet controllers.
+
+	  This selects Allwinner SoC glue layer support for the
+	  stmmac device driver. This driver is used for A20/A31
+	  GMAC 	  ethernet controller.
+
 config STMMAC_PCI
 	bool "STMMAC PCI bus support"
 	depends on STMMAC_ETH && PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 356a9dd32be7..ecadecea79b2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -1,6 +1,7 @@
 obj-$(CONFIG_STMMAC_ETH) += stmmac.o
 stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
 stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
+stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
 stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o	\
 	      chain_mode.o dwmac_lib.o dwmac1000_core.o  dwmac1000_dma.o \
 	      dwmac100_core.o dwmac100_dma.o enh_desc.o  norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index d234ab540b29..72d282bf33a5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -51,6 +51,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
 	priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
 
 	while (len != 0) {
+		priv->tx_skbuff[entry] = NULL;
 		entry = (++priv->cur_tx) % txsize;
 		desc = priv->dma_tx + entry;
 
@@ -62,7 +63,6 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
 			priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
 							STMMAC_CHAIN_MODE);
 			priv->hw->desc->set_tx_owner(desc);
-			priv->tx_skbuff[entry] = NULL;
 			len -= bmax;
 			i++;
 		} else {
@@ -73,7 +73,6 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
 			priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
 							STMMAC_CHAIN_MODE);
 			priv->hw->desc->set_tx_owner(desc);
-			priv->tx_skbuff[entry] = NULL;
 			len = 0;
 		}
 	}
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index fc94f202a43e..7834a3993946 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -29,7 +29,6 @@
 #include <linux/netdevice.h>
 #include <linux/phy.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 #define STMMAC_VLAN_TAG_USED
 #include <linux/if_vlan.h>
@@ -293,6 +292,8 @@ struct dma_features {
 #define STMMAC_CHAIN_MODE	0x1
 #define STMMAC_RING_MODE	0x2
 
+#define JUMBO_LEN		9000
+
 struct stmmac_desc_ops {
 	/* DMA RX descriptor ring initialization */
 	void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode,
@@ -369,7 +370,7 @@ struct stmmac_dma_ops {
 
 struct stmmac_ops {
 	/* MAC core initialization */
-	void (*core_init) (void __iomem *ioaddr);
+	void (*core_init) (void __iomem *ioaddr, int mtu);
 	/* Enable and verify that the IPC module is supported */
 	int (*rx_ipc) (void __iomem *ioaddr);
 	/* Dump MAC registers */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
new file mode 100644
index 000000000000..771cd15fca18
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -0,0 +1,140 @@
+/**
+ * dwmac-sunxi.c - Allwinner sunxi DWMAC specific glue layer
+ *
+ * Copyright (C) 2013 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai  <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/stmmac.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/of_net.h>
+#include <linux/regulator/consumer.h>
+
+struct sunxi_priv_data {
+	int interface;
+	int clk_enabled;
+	struct clk *tx_clk;
+	struct regulator *regulator;
+};
+
+static void *sun7i_gmac_setup(struct platform_device *pdev)
+{
+	struct sunxi_priv_data *gmac;
+	struct device *dev = &pdev->dev;
+
+	gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+	if (!gmac)
+		return ERR_PTR(-ENOMEM);
+
+	gmac->interface = of_get_phy_mode(dev->of_node);
+
+	gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
+	if (IS_ERR(gmac->tx_clk)) {
+		dev_err(dev, "could not get tx clock\n");
+		return gmac->tx_clk;
+	}
+
+	/* Optional regulator for PHY */
+	gmac->regulator = devm_regulator_get_optional(dev, "phy");
+	if (IS_ERR(gmac->regulator)) {
+		if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
+			return ERR_PTR(-EPROBE_DEFER);
+		dev_info(dev, "no regulator found\n");
+		gmac->regulator = NULL;
+	}
+
+	return gmac;
+}
+
+#define SUN7I_GMAC_GMII_RGMII_RATE	125000000
+#define SUN7I_GMAC_MII_RATE		25000000
+
+static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
+{
+	struct sunxi_priv_data *gmac = priv;
+	int ret;
+
+	if (gmac->regulator) {
+		ret = regulator_enable(gmac->regulator);
+		if (ret)
+			return ret;
+	}
+
+	/* Set GMAC interface port mode
+	 *
+	 * The GMAC TX clock lines are configured by setting the clock
+	 * rate, which then uses the auto-reparenting feature of the
+	 * clock driver, and enabling/disabling the clock.
+	 */
+	if (gmac->interface == PHY_INTERFACE_MODE_RGMII) {
+		clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
+		clk_prepare_enable(gmac->tx_clk);
+		gmac->clk_enabled = 1;
+	} else {
+		clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
+		clk_prepare(gmac->tx_clk);
+	}
+
+	return 0;
+}
+
+static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
+{
+	struct sunxi_priv_data *gmac = priv;
+
+	if (gmac->clk_enabled) {
+		clk_disable(gmac->tx_clk);
+		gmac->clk_enabled = 0;
+	}
+	clk_unprepare(gmac->tx_clk);
+
+	if (gmac->regulator)
+		regulator_disable(gmac->regulator);
+}
+
+static void sun7i_fix_speed(void *priv, unsigned int speed)
+{
+	struct sunxi_priv_data *gmac = priv;
+
+	/* only GMII mode requires us to reconfigure the clock lines */
+	if (gmac->interface != PHY_INTERFACE_MODE_GMII)
+		return;
+
+	if (gmac->clk_enabled) {
+		clk_disable(gmac->tx_clk);
+		gmac->clk_enabled = 0;
+	}
+	clk_unprepare(gmac->tx_clk);
+
+	if (speed == 1000) {
+		clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
+		clk_prepare_enable(gmac->tx_clk);
+		gmac->clk_enabled = 1;
+	} else {
+		clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
+		clk_prepare(gmac->tx_clk);
+	}
+}
+
+/* of_data specifying hardware features and callbacks.
+ * hardware features were copied from Allwinner drivers. */
+const struct stmmac_of_data sun7i_gmac_data = {
+	.has_gmac = 1,
+	.tx_coe = 1,
+	.fix_mac_speed = sun7i_fix_speed,
+	.setup = sun7i_gmac_setup,
+	.init = sun7i_gmac_init,
+	.exit = sun7i_gmac_exit,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index c12aabb8cf93..f37d90f114f5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -126,11 +126,8 @@ enum power_event {
 #define GMAC_ANE_PSE		(3 << 7)
 #define GMAC_ANE_PSE_SHIFT	7
 
- /* GMAC Configuration defines */
-#define GMAC_CONTROL_TC	0x01000000	/* Transmit Conf. in RGMII/SGMII */
-#define GMAC_CONTROL_WD	0x00800000	/* Disable Watchdog on receive */
-
 /* GMAC Configuration defines */
+#define GMAC_CONTROL_2K 0x08000000	/* IEEE 802.3as 2K packets */
 #define GMAC_CONTROL_TC	0x01000000	/* Transmit Conf. in RGMII/SGMII */
 #define GMAC_CONTROL_WD	0x00800000	/* Disable Watchdog on receive */
 #define GMAC_CONTROL_JD	0x00400000	/* Jabber disable */
@@ -156,7 +153,7 @@ enum inter_frame_gap {
 #define GMAC_CONTROL_RE		0x00000004	/* Receiver Enable */
 
 #define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
-			GMAC_CONTROL_JE | GMAC_CONTROL_BE)
+			GMAC_CONTROL_BE)
 
 /* GMAC Frame Filter defines */
 #define GMAC_FRAME_FILTER_PR	0x00000001	/* Promiscuous Mode */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index cdd926832e27..b3e148ef5683 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -32,10 +32,15 @@
 #include <asm/io.h>
 #include "dwmac1000.h"
 
-static void dwmac1000_core_init(void __iomem *ioaddr)
+static void dwmac1000_core_init(void __iomem *ioaddr, int mtu)
 {
 	u32 value = readl(ioaddr + GMAC_CONTROL);
 	value |= GMAC_CORE_INIT;
+	if (mtu > 1500)
+		value |= GMAC_CONTROL_2K;
+	if (mtu > 2000)
+		value |= GMAC_CONTROL_JE;
+
 	writel(value, ioaddr + GMAC_CONTROL);
 
 	/* Mask GMAC interrupts */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 5857d677dac1..2ff767bcfdd0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -32,7 +32,7 @@
 #include <asm/io.h>
 #include "dwmac100.h"
 
-static void dwmac100_core_init(void __iomem *ioaddr)
+static void dwmac100_core_init(void __iomem *ioaddr, int mtu)
 {
 	u32 value = readl(ioaddr + MAC_CONTROL);
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 1ef9d8a555aa..a96c7c2f5f3f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -58,6 +58,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
 		priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
 						STMMAC_RING_MODE);
 		wmb();
+		priv->tx_skbuff[entry] = NULL;
 		entry = (++priv->cur_tx) % txsize;
 
 		if (priv->extend_desc)
@@ -73,7 +74,6 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
 						STMMAC_RING_MODE);
 		wmb();
 		priv->hw->desc->set_tx_owner(desc);
-		priv->tx_skbuff[entry] = NULL;
 	} else {
 		desc->des2 = dma_map_single(priv->device, skb->data,
 					    nopaged_len, DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 22f89ffdfd95..d9af26ed58ee 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -32,6 +32,7 @@
 #include <linux/pci.h>
 #include "common.h"
 #include <linux/ptp_clock_kernel.h>
+#include <linux/reset.h>
 
 struct stmmac_priv {
 	/* Frequently used values are kept adjacent for cache effect */
@@ -91,6 +92,7 @@ struct stmmac_priv {
 	int wolopts;
 	int wol_irq;
 	struct clk *stmmac_clk;
+	struct reset_control *stmmac_rst;
 	int clk_csr;
 	struct timer_list eee_ctrl_timer;
 	int lpi_irq;
@@ -105,21 +107,19 @@ struct stmmac_priv {
 	unsigned int default_addend;
 	u32 adv_ts;
 	int use_riwt;
+	int irq_wake;
 	spinlock_t ptp_lock;
 };
 
-extern int phyaddr;
-
 int stmmac_mdio_unregister(struct net_device *ndev);
 int stmmac_mdio_register(struct net_device *ndev);
+int stmmac_mdio_reset(struct mii_bus *mii);
 void stmmac_set_ethtool_ops(struct net_device *netdev);
 extern const struct stmmac_desc_ops enh_desc_ops;
 extern const struct stmmac_desc_ops ndesc_ops;
 extern const struct stmmac_hwtimestamp stmmac_ptp;
 int stmmac_ptp_register(struct stmmac_priv *priv);
 void stmmac_ptp_unregister(struct stmmac_priv *priv);
-int stmmac_freeze(struct net_device *ndev);
-int stmmac_restore(struct net_device *ndev);
 int stmmac_resume(struct net_device *ndev);
 int stmmac_suspend(struct net_device *ndev);
 int stmmac_dvr_remove(struct net_device *ndev);
@@ -130,6 +130,9 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv);
 bool stmmac_eee_init(struct stmmac_priv *priv);
 
 #ifdef CONFIG_STMMAC_PLATFORM
+#ifdef CONFIG_DWMAC_SUNXI
+extern const struct stmmac_of_data sun7i_gmac_data;
+#endif
 extern struct platform_driver stmmac_pltfr_driver;
 static inline int stmmac_register_platform(void)
 {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 797b56a0efc4..d93aa87408c2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -43,6 +43,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
 #include <linux/prefetch.h>
+#include <linux/pinctrl/consumer.h>
 #ifdef CONFIG_STMMAC_DEBUG_FS
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
@@ -50,9 +51,9 @@
 #include <linux/net_tstamp.h>
 #include "stmmac_ptp.h"
 #include "stmmac.h"
+#include <linux/reset.h>
 
 #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
-#define JUMBO_LEN	9000
 
 /* Module parameters */
 #define TX_TIMEO	5000
@@ -64,7 +65,7 @@ static int debug = -1;
 module_param(debug, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
 
-int phyaddr = -1;
+static int phyaddr = -1;
 module_param(phyaddr, int, S_IRUGO);
 MODULE_PARM_DESC(phyaddr, "Physical device address");
 
@@ -91,7 +92,7 @@ static int tc = TC_DEFAULT;
 module_param(tc, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(tc, "DMA threshold control value");
 
-#define DMA_BUFFER_SIZE	BUF_SIZE_2KiB
+#define DMA_BUFFER_SIZE	BUF_SIZE_4KiB
 static int buf_sz = DMA_BUFFER_SIZE;
 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
@@ -332,7 +333,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
 		return;
 
 	/* exit if skb doesn't support hw tstamp */
-	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
+	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
 		return;
 
 	if (priv->adv_ts)
@@ -776,6 +777,7 @@ static int stmmac_init_phy(struct net_device *dev)
 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
 	char bus_id[MII_BUS_ID_SIZE];
 	int interface = priv->plat->interface;
+	int max_speed = priv->plat->max_speed;
 	priv->oldlink = 0;
 	priv->speed = 0;
 	priv->oldduplex = -1;
@@ -800,7 +802,8 @@ static int stmmac_init_phy(struct net_device *dev)
 
 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
 	if ((interface == PHY_INTERFACE_MODE_MII) ||
-	    (interface == PHY_INTERFACE_MODE_RMII))
+	    (interface == PHY_INTERFACE_MODE_RMII) ||
+		(max_speed < 1000 &&  max_speed > 0))
 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
 					 SUPPORTED_1000baseT_Full);
 
@@ -990,70 +993,12 @@ static int init_dma_desc_rings(struct net_device *dev)
 	if (bfsize < BUF_SIZE_16KiB)
 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
 
+	priv->dma_buf_sz = bfsize;
+
 	if (netif_msg_probe(priv))
 		pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__,
 			 txsize, rxsize, bfsize);
 
-	if (priv->extend_desc) {
-		priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
-						   sizeof(struct
-							  dma_extended_desc),
-						   &priv->dma_rx_phy,
-						   GFP_KERNEL);
-		if (!priv->dma_erx)
-			goto err_dma;
-
-		priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
-						   sizeof(struct
-							  dma_extended_desc),
-						   &priv->dma_tx_phy,
-						   GFP_KERNEL);
-		if (!priv->dma_etx) {
-			dma_free_coherent(priv->device, priv->dma_rx_size *
-					sizeof(struct dma_extended_desc),
-					priv->dma_erx, priv->dma_rx_phy);
-			goto err_dma;
-		}
-	} else {
-		priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
-						  sizeof(struct dma_desc),
-						  &priv->dma_rx_phy,
-						  GFP_KERNEL);
-		if (!priv->dma_rx)
-			goto err_dma;
-
-		priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
-						  sizeof(struct dma_desc),
-						  &priv->dma_tx_phy,
-						  GFP_KERNEL);
-		if (!priv->dma_tx) {
-			dma_free_coherent(priv->device, priv->dma_rx_size *
-					sizeof(struct dma_desc),
-					priv->dma_rx, priv->dma_rx_phy);
-			goto err_dma;
-		}
-	}
-
-	priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
-					    GFP_KERNEL);
-	if (!priv->rx_skbuff_dma)
-		goto err_rx_skbuff_dma;
-
-	priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
-					GFP_KERNEL);
-	if (!priv->rx_skbuff)
-		goto err_rx_skbuff;
-
-	priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
-					    GFP_KERNEL);
-	if (!priv->tx_skbuff_dma)
-		goto err_tx_skbuff_dma;
-
-	priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
-					GFP_KERNEL);
-	if (!priv->tx_skbuff)
-		goto err_tx_skbuff;
-
 	if (netif_msg_probe(priv)) {
 		pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
 			 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
@@ -1079,7 +1024,6 @@ static int init_dma_desc_rings(struct net_device *dev)
 	}
 	priv->cur_rx = 0;
 	priv->dirty_rx = (unsigned int)(i - rxsize);
-	priv->dma_buf_sz = bfsize;
 	buf_sz = bfsize;
 
 	/* Setup the chained descriptor addresses */
@@ -1121,30 +1065,6 @@ static int init_dma_desc_rings(struct net_device *dev)
 err_init_rx_buffers:
 	while (--i >= 0)
 		stmmac_free_rx_buffers(priv, i);
-	kfree(priv->tx_skbuff);
-err_tx_skbuff:
-	kfree(priv->tx_skbuff_dma);
-err_tx_skbuff_dma:
-	kfree(priv->rx_skbuff);
-err_rx_skbuff:
-	kfree(priv->rx_skbuff_dma);
-err_rx_skbuff_dma:
-	if (priv->extend_desc) {
-		dma_free_coherent(priv->device, priv->dma_tx_size *
-				  sizeof(struct dma_extended_desc),
-				  priv->dma_etx, priv->dma_tx_phy);
-		dma_free_coherent(priv->device, priv->dma_rx_size *
-				  sizeof(struct dma_extended_desc),
-				  priv->dma_erx, priv->dma_rx_phy);
-	} else {
-		dma_free_coherent(priv->device,
-				priv->dma_tx_size * sizeof(struct dma_desc),
-				priv->dma_tx, priv->dma_tx_phy);
-		dma_free_coherent(priv->device,
-				priv->dma_rx_size * sizeof(struct dma_desc),
-				priv->dma_rx, priv->dma_rx_phy);
-	}
-err_dma:
 	return ret;
 }
 
@@ -1161,25 +1081,107 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
 	int i;
 
 	for (i = 0; i < priv->dma_tx_size; i++) {
-		if (priv->tx_skbuff[i] != NULL) {
-			struct dma_desc *p;
-			if (priv->extend_desc)
-				p = &((priv->dma_etx + i)->basic);
-			else
-				p = priv->dma_tx + i;
+		struct dma_desc *p;
 
-			if (priv->tx_skbuff_dma[i])
-				dma_unmap_single(priv->device,
-						 priv->tx_skbuff_dma[i],
-						 priv->hw->desc->get_tx_len(p),
-						 DMA_TO_DEVICE);
+		if (priv->extend_desc)
+			p = &((priv->dma_etx + i)->basic);
+		else
+			p = priv->dma_tx + i;
+
+		if (priv->tx_skbuff_dma[i]) {
+			dma_unmap_single(priv->device,
+					 priv->tx_skbuff_dma[i],
+					 priv->hw->desc->get_tx_len(p),
+					 DMA_TO_DEVICE);
+			priv->tx_skbuff_dma[i] = 0;
+		}
+
+		if (priv->tx_skbuff[i] != NULL) {
 			dev_kfree_skb_any(priv->tx_skbuff[i]);
 			priv->tx_skbuff[i] = NULL;
-			priv->tx_skbuff_dma[i] = 0;
 		}
 	}
 }
 
+static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+{
+	unsigned int txsize = priv->dma_tx_size;
+	unsigned int rxsize = priv->dma_rx_size;
+	int ret = -ENOMEM;
+
+	priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
+					    GFP_KERNEL);
+	if (!priv->rx_skbuff_dma)
+		return -ENOMEM;
+
+	priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
+					GFP_KERNEL);
+	if (!priv->rx_skbuff)
+		goto err_rx_skbuff;
+
+	priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
+					    GFP_KERNEL);
+	if (!priv->tx_skbuff_dma)
+		goto err_tx_skbuff_dma;
+
+	priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
+					GFP_KERNEL);
+	if (!priv->tx_skbuff)
+		goto err_tx_skbuff;
+
+	if (priv->extend_desc) {
+		priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
+						   sizeof(struct
+							  dma_extended_desc),
+						   &priv->dma_rx_phy,
+						   GFP_KERNEL);
+		if (!priv->dma_erx)
+			goto err_dma;
+
+		priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
+						   sizeof(struct
+							  dma_extended_desc),
+						   &priv->dma_tx_phy,
+						   GFP_KERNEL);
+		if (!priv->dma_etx) {
+			dma_free_coherent(priv->device, priv->dma_rx_size *
+					sizeof(struct dma_extended_desc),
+					priv->dma_erx, priv->dma_rx_phy);
+			goto err_dma;
+		}
+	} else {
+		priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
+						  sizeof(struct dma_desc),
+						  &priv->dma_rx_phy,
+						  GFP_KERNEL);
+		if (!priv->dma_rx)
+			goto err_dma;
+
+		priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
+						  sizeof(struct dma_desc),
+						  &priv->dma_tx_phy,
+						  GFP_KERNEL);
+		if (!priv->dma_tx) {
+			dma_free_coherent(priv->device, priv->dma_rx_size *
+					sizeof(struct dma_desc),
+					priv->dma_rx, priv->dma_rx_phy);
+			goto err_dma;
+		}
+	}
+
+	return 0;
+
+err_dma:
+	kfree(priv->tx_skbuff);
+err_tx_skbuff:
+	kfree(priv->tx_skbuff_dma);
+err_tx_skbuff_dma:
+	kfree(priv->rx_skbuff);
+err_rx_skbuff:
+	kfree(priv->rx_skbuff_dma);
+	return ret;
+}
+
 static void free_dma_desc_resources(struct stmmac_priv *priv)
 {
 	/* Release the DMA TX/RX socket buffers */
@@ -1589,49 +1591,29 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
 }
 
 /**
- *  stmmac_open - open entry point of the driver
+ * stmmac_hw_setup: setup mac in a usable state.
  *  @dev : pointer to the device structure.
  *  Description:
- *  This function is the open entry point of the driver.
+ *  This function sets up the ip in a usable state.
  *  Return value:
  *  0 on success and an appropriate (-)ve integer as defined in errno.h
  *  file on failure.
  */
-static int stmmac_open(struct net_device *dev)
+static int stmmac_hw_setup(struct net_device *dev)
 {
 	struct stmmac_priv *priv = netdev_priv(dev);
 	int ret;
 
-	clk_prepare_enable(priv->stmmac_clk);
-
-	stmmac_check_ether_addr(priv);
-
-	if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
-	    priv->pcs != STMMAC_PCS_RTBI) {
-		ret = stmmac_init_phy(dev);
-		if (ret) {
-			pr_err("%s: Cannot attach to PHY (error: %d)\n",
-			       __func__, ret);
-			goto phy_error;
-		}
-	}
-
-	/* Create and initialize the TX/RX descriptors chains. */
-	priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
-	priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
-	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
-
 	ret = init_dma_desc_rings(dev);
 	if (ret < 0) {
 		pr_err("%s: DMA descriptors initialization failed\n", __func__);
-		goto dma_desc_error;
+		return ret;
 	}
-
 	/* DMA initialization and SW reset */
 	ret = stmmac_init_dma_engine(priv);
 	if (ret < 0) {
 		pr_err("%s: DMA engine initialization failed\n", __func__);
-		goto init_error;
+		return ret;
 	}
 
 	/* Copy the MAC addr into the HW  */
@@ -1642,38 +1624,7 @@ static int stmmac_open(struct net_device *dev)
 		priv->plat->bus_setup(priv->ioaddr);
 
 	/* Initialize the MAC Core */
-	priv->hw->mac->core_init(priv->ioaddr);
-
-	/* Request the IRQ lines */
-	ret = request_irq(dev->irq, stmmac_interrupt,
-			  IRQF_SHARED, dev->name, dev);
-	if (unlikely(ret < 0)) {
-		pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
-		       __func__, dev->irq, ret);
-		goto init_error;
-	}
-
-	/* Request the Wake IRQ in case of another line is used for WoL */
-	if (priv->wol_irq != dev->irq) {
-		ret = request_irq(priv->wol_irq, stmmac_interrupt,
-				  IRQF_SHARED, dev->name, dev);
-		if (unlikely(ret < 0)) {
-			pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n",
-			       __func__, priv->wol_irq, ret);
-			goto wolirq_error;
-		}
-	}
-
-	/* Request the IRQ lines */
-	if (priv->lpi_irq != -ENXIO) {
-		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
-				  dev->name, dev);
-		if (unlikely(ret < 0)) {
-			pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
-			       __func__, priv->lpi_irq, ret);
-			goto lpiirq_error;
-		}
-	}
+	priv->hw->mac->core_init(priv->ioaddr, dev->mtu);
 
 	/* Enable the MAC Rx/Tx */
 	stmmac_set_mac(priv->ioaddr, true);
@@ -1681,10 +1632,6 @@ static int stmmac_open(struct net_device *dev)
 	/* Set the HW DMA mode and the COE */
 	stmmac_dma_operation_mode(priv);
 
-	/* Extra statistics */
-	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
-	priv->xstats.threshold = tc;
-
 	stmmac_mmc_setup(priv);
 
 	ret = stmmac_init_ptp(priv);
@@ -1706,10 +1653,6 @@ static int stmmac_open(struct net_device *dev)
 		priv->hw->mac->dump_regs(priv->ioaddr);
 		priv->hw->dma->dump_regs(priv->ioaddr);
 	}
-
-	if (priv->phydev)
-		phy_start(priv->phydev);
-
 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
 
 	priv->eee_enabled = stmmac_eee_init(priv);
@@ -1724,6 +1667,90 @@ static int stmmac_open(struct net_device *dev)
 	if (priv->pcs && priv->hw->mac->ctrl_ane)
 		priv->hw->mac->ctrl_ane(priv->ioaddr, 0);
 
+	return 0;
+}
+
+/**
+ *  stmmac_open - open entry point of the driver
+ *  @dev : pointer to the device structure.
+ *  Description:
+ *  This function is the open entry point of the driver.
+ *  Return value:
+ *  0 on success and an appropriate (-)ve integer as defined in errno.h
+ *  file on failure.
+ */
+static int stmmac_open(struct net_device *dev)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+	int ret;
+
+	stmmac_check_ether_addr(priv);
+
+	if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
+	    priv->pcs != STMMAC_PCS_RTBI) {
+		ret = stmmac_init_phy(dev);
+		if (ret) {
+			pr_err("%s: Cannot attach to PHY (error: %d)\n",
+			       __func__, ret);
+			goto phy_error;
+		}
+	}
+
+	/* Extra statistics */
+	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
+	priv->xstats.threshold = tc;
+
+	/* Create and initialize the TX/RX descriptors chains. */
+	priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
+	priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
+	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
+
+	alloc_dma_desc_resources(priv);
+	if (ret < 0) {
+		pr_err("%s: DMA descriptors allocation failed\n", __func__);
+		goto dma_desc_error;
+	}
+
+	ret = stmmac_hw_setup(dev);
+	if (ret < 0) {
+		pr_err("%s: Hw setup failed\n", __func__);
+		goto init_error;
+	}
+
+	if (priv->phydev)
+		phy_start(priv->phydev);
+
+	/* Request the IRQ lines */
+	ret = request_irq(dev->irq, stmmac_interrupt,
+			  IRQF_SHARED, dev->name, dev);
+	if (unlikely(ret < 0)) {
+		pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
+		       __func__, dev->irq, ret);
+		goto init_error;
+	}
+
+	/* Request the Wake IRQ in case of another line is used for WoL */
+	if (priv->wol_irq != dev->irq) {
+		ret = request_irq(priv->wol_irq, stmmac_interrupt,
+				  IRQF_SHARED, dev->name, dev);
+		if (unlikely(ret < 0)) {
+			pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n",
+			       __func__, priv->wol_irq, ret);
+			goto wolirq_error;
+		}
+	}
+
+	/* Request the IRQ lines */
+	if (priv->lpi_irq != -ENXIO) {
+		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
+				  dev->name, dev);
+		if (unlikely(ret < 0)) {
+			pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
+			       __func__, priv->lpi_irq, ret);
+			goto lpiirq_error;
+		}
+	}
+
 	napi_enable(&priv->napi);
 	netif_start_queue(dev);
 
@@ -1794,7 +1821,6 @@ static int stmmac_release(struct net_device *dev)
 #ifdef CONFIG_STMMAC_DEBUG_FS
 	stmmac_exit_fs();
 #endif
-	clk_disable_unprepare(priv->stmmac_clk);
 
 	stmmac_release_ptp(priv);
 
@@ -1844,8 +1870,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	first = desc;
 
-	priv->tx_skbuff[entry] = skb;
-
 	/* To program the descriptors according to the size of the frame */
 	if (priv->mode == STMMAC_RING_MODE) {
 		is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
@@ -1873,6 +1897,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 		int len = skb_frag_size(frag);
 
+		priv->tx_skbuff[entry] = NULL;
 		entry = (++priv->cur_tx) % txsize;
 		if (priv->extend_desc)
 			desc = (struct dma_desc *)(priv->dma_etx + entry);
@@ -1882,7 +1907,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 		desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
 					      DMA_TO_DEVICE);
 		priv->tx_skbuff_dma[entry] = desc->des2;
-		priv->tx_skbuff[entry] = NULL;
 		priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
 						priv->mode);
 		wmb();
@@ -1890,6 +1914,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 		wmb();
 	}
 
+	priv->tx_skbuff[entry] = skb;
+
 	/* Finalize the latest segment. */
 	priv->hw->desc->close_tx_desc(desc);
 
@@ -1951,6 +1977,23 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 	return NETDEV_TX_OK;
 }
 
+static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
+{
+	struct ethhdr *ehdr;
+	u16 vlanid;
+
+	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
+	    NETIF_F_HW_VLAN_CTAG_RX &&
+	    !__vlan_get_tag(skb, &vlanid)) {
+		/* pop the vlan tag */
+		ehdr = (struct ethhdr *)skb->data;
+		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
+		skb_pull(skb, VLAN_HLEN);
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
+	}
+}
+
+
 /**
  * stmmac_rx_refill: refill used skb preallocated buffers
  * @priv: driver private structure
@@ -2102,6 +2145,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
 				print_pkt(skb->data, frame_len);
 			}
 
+			stmmac_rx_vlan(priv->dev, skb);
+
 			skb->protocol = eth_type_trans(skb, priv->dev);
 
 			if (unlikely(!coe))
@@ -2229,6 +2274,9 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
 	else
 		max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
 
+	if (priv->plat->maxmtu < max_mtu)
+		max_mtu = priv->plat->maxmtu;
+
 	if ((new_mtu < 46) || (new_mtu > max_mtu)) {
 		pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
 		return -EINVAL;
@@ -2276,6 +2324,9 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
 	struct net_device *dev = (struct net_device *)dev_id;
 	struct stmmac_priv *priv = netdev_priv(dev);
 
+	if (priv->irq_wake)
+		pm_wakeup_event(priv->device, 0);
+
 	if (unlikely(!dev)) {
 		pr_err("%s: invalid dev pointer\n", __func__);
 		return IRQ_NONE;
@@ -2680,10 +2731,32 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
 	if ((phyaddr >= 0) && (phyaddr <= 31))
 		priv->plat->phy_addr = phyaddr;
 
+	priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME);
+	if (IS_ERR(priv->stmmac_clk)) {
+		dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
+			 __func__);
+		ret = PTR_ERR(priv->stmmac_clk);
+		goto error_clk_get;
+	}
+	clk_prepare_enable(priv->stmmac_clk);
+
+	priv->stmmac_rst = devm_reset_control_get(priv->device,
+						  STMMAC_RESOURCE_NAME);
+	if (IS_ERR(priv->stmmac_rst)) {
+		if (PTR_ERR(priv->stmmac_rst) == -EPROBE_DEFER) {
+			ret = -EPROBE_DEFER;
+			goto error_hw_init;
+		}
+		dev_info(priv->device, "no reset control found\n");
+		priv->stmmac_rst = NULL;
+	}
+	if (priv->stmmac_rst)
+		reset_control_deassert(priv->stmmac_rst);
+
 	/* Init MAC and get the capabilities */
 	ret = stmmac_hw_init(priv);
 	if (ret)
-		goto error_free_netdev;
+		goto error_hw_init;
 
 	ndev->netdev_ops = &stmmac_netdev_ops;
 
@@ -2721,12 +2794,6 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
 		goto error_netdev_register;
 	}
 
-	priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME);
-	if (IS_ERR(priv->stmmac_clk)) {
-		pr_warn("%s: warning: cannot get CSR clock\n", __func__);
-		goto error_clk_get;
-	}
-
 	/* If a specific clk_csr value is passed from the platform
 	 * this means that the CSR Clock Range selection cannot be
 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
@@ -2754,15 +2821,15 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
 	return priv;
 
 error_mdio_register:
-	clk_put(priv->stmmac_clk);
-error_clk_get:
 	unregister_netdev(ndev);
 error_netdev_register:
 	netif_napi_del(&priv->napi);
-error_free_netdev:
+error_hw_init:
+	clk_disable_unprepare(priv->stmmac_clk);
+error_clk_get:
 	free_netdev(ndev);
 
-	return NULL;
+	return ERR_PTR(ret);
 }
 
 /**
@@ -2786,6 +2853,9 @@ int stmmac_dvr_remove(struct net_device *ndev)
 		stmmac_mdio_unregister(ndev);
 	netif_carrier_off(ndev);
 	unregister_netdev(ndev);
+	if (priv->stmmac_rst)
+		reset_control_assert(priv->stmmac_rst);
+	clk_disable_unprepare(priv->stmmac_clk);
 	free_netdev(ndev);
 
 	return 0;
@@ -2817,10 +2887,12 @@ int stmmac_suspend(struct net_device *ndev)
 	stmmac_clear_descriptors(priv);
 
 	/* Enable Power down mode by programming the PMT regs */
-	if (device_may_wakeup(priv->device))
+	if (device_may_wakeup(priv->device)) {
 		priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
-	else {
+		priv->irq_wake = 1;
+	} else {
 		stmmac_set_mac(priv->ioaddr, false);
+		pinctrl_pm_select_sleep_state(priv->device);
 		/* Disable clock in case of PWM is off */
 		clk_disable_unprepare(priv->stmmac_clk);
 	}
@@ -2844,18 +2916,21 @@ int stmmac_resume(struct net_device *ndev)
 	 * this bit because it can generate problems while resuming
 	 * from another devices (e.g. serial console).
 	 */
-	if (device_may_wakeup(priv->device))
+	if (device_may_wakeup(priv->device)) {
 		priv->hw->mac->pmt(priv->ioaddr, 0);
-	else
+		priv->irq_wake = 0;
+	} else {
+		pinctrl_pm_select_default_state(priv->device);
 		/* enable the clk prevously disabled */
 		clk_prepare_enable(priv->stmmac_clk);
+		/* reset the phy so that it's ready */
+		if (priv->mii)
+			stmmac_mdio_reset(priv->mii);
+	}
 
 	netif_device_attach(ndev);
 
-	/* Enable the MAC and DMA */
-	stmmac_set_mac(priv->ioaddr, true);
-	priv->hw->dma->start_tx(priv->ioaddr);
-	priv->hw->dma->start_rx(priv->ioaddr);
+	stmmac_hw_setup(ndev);
 
 	napi_enable(&priv->napi);
 
@@ -2868,22 +2943,6 @@ int stmmac_resume(struct net_device *ndev)
 
 	return 0;
 }
-
-int stmmac_freeze(struct net_device *ndev)
-{
-	if (!ndev || !netif_running(ndev))
-		return 0;
-
-	return stmmac_release(ndev);
-}
-
-int stmmac_restore(struct net_device *ndev)
-{
-	if (!ndev || !netif_running(ndev))
-		return 0;
-
-	return stmmac_open(ndev);
-}
 #endif /* CONFIG_PM */
 
 /* Driver can be configured w/ and w/ both PCI and Platf drivers
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index fe7bc9903867..a468eb107823 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -128,7 +128,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
  * @bus: points to the mii_bus structure
  * Description: reset the MII bus
  */
-static int stmmac_mdio_reset(struct mii_bus *bus)
+int stmmac_mdio_reset(struct mii_bus *bus)
 {
 #if defined(CONFIG_STMMAC_PLATFORM)
 	struct net_device *ndev = bus->priv;
@@ -166,7 +166,6 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
 			udelay(data->delays[1]);
 			gpio_set_value(reset_gpio, active_low ? 1 : 0);
 			udelay(data->delays[2]);
-			gpio_free(reset_gpio);
 		}
 	}
 #endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 644d80ece067..291608924849 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -26,9 +26,9 @@
 #include <linux/pci.h>
 #include "stmmac.h"
 
-struct plat_stmmacenet_data plat_dat;
-struct stmmac_mdio_bus_data mdio_data;
-struct stmmac_dma_cfg dma_cfg;
+static struct plat_stmmacenet_data plat_dat;
+static struct stmmac_mdio_bus_data mdio_data;
+static struct stmmac_dma_cfg dma_cfg;
 
 static void stmmac_default_data(void)
 {
@@ -100,9 +100,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
 	stmmac_default_data();
 
 	priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr);
-	if (!priv) {
+	if (IS_ERR(priv)) {
 		pr_err("%s: main driver probe failed", __func__);
-		ret = -ENODEV;
+		ret = PTR_ERR(priv);
 		goto err_out;
 	}
 	priv->dev->irq = pdev->irq;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 51c9069ef405..5884a7d2063b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -26,8 +26,23 @@
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_net.h>
+#include <linux/of_device.h>
 #include "stmmac.h"
 
+static const struct of_device_id stmmac_dt_ids[] = {
+#ifdef CONFIG_DWMAC_SUNXI
+	{ .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
+#endif
+	/* SoC specific glue layers should come before generic bindings */
+	{ .compatible = "st,spear600-gmac"},
+	{ .compatible = "snps,dwmac-3.610"},
+	{ .compatible = "snps,dwmac-3.70a"},
+	{ .compatible = "snps,dwmac-3.710"},
+	{ .compatible = "snps,dwmac"},
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
+
 #ifdef CONFIG_OF
 static int stmmac_probe_config_dt(struct platform_device *pdev,
 				  struct plat_stmmacenet_data *plat,
@@ -35,23 +50,63 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
 {
 	struct device_node *np = pdev->dev.of_node;
 	struct stmmac_dma_cfg *dma_cfg;
+	const struct of_device_id *device;
 
 	if (!np)
 		return -ENODEV;
 
+	device = of_match_device(stmmac_dt_ids, &pdev->dev);
+	if (!device)
+		return -ENODEV;
+
+	if (device->data) {
+		const struct stmmac_of_data *data = device->data;
+		plat->has_gmac = data->has_gmac;
+		plat->enh_desc = data->enh_desc;
+		plat->tx_coe = data->tx_coe;
+		plat->rx_coe = data->rx_coe;
+		plat->bugged_jumbo = data->bugged_jumbo;
+		plat->pmt = data->pmt;
+		plat->riwt_off = data->riwt_off;
+		plat->fix_mac_speed = data->fix_mac_speed;
+		plat->bus_setup = data->bus_setup;
+		plat->setup = data->setup;
+		plat->free = data->free;
+		plat->init = data->init;
+		plat->exit = data->exit;
+	}
+
 	*mac = of_get_mac_address(np);
 	plat->interface = of_get_phy_mode(np);
 
+	/* Get max speed of operation from device tree */
+	if (of_property_read_u32(np, "max-speed", &plat->max_speed))
+		plat->max_speed = -1;
+
 	plat->bus_id = of_alias_get_id(np, "ethernet");
 	if (plat->bus_id < 0)
 		plat->bus_id = 0;
 
-	of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr);
+	/* Default to phy auto-detection */
+	plat->phy_addr = -1;
+
+	/* "snps,phy-addr" is not a standard property. Mark it as deprecated
+	 * and warn of its use. Remove this when phy node support is added.
+	 */
+	if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
+		dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
 
 	plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
 					   sizeof(struct stmmac_mdio_bus_data),
 					   GFP_KERNEL);
 
+	plat->force_sf_dma_mode = of_property_read_bool(np, "snps,force_sf_dma_mode");
+
+	/* Set the maxmtu to a default of JUMBO_LEN in case the
+	 * parameter is not present in the device tree.
+	 */
+	plat->maxmtu = JUMBO_LEN;
+
 	/*
 	 * Currently only the properties needed on SPEAr600
 	 * are provided. All other properties should be added
@@ -60,6 +115,14 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
 	if (of_device_is_compatible(np, "st,spear600-gmac") ||
 		of_device_is_compatible(np, "snps,dwmac-3.70a") ||
 		of_device_is_compatible(np, "snps,dwmac")) {
+		/* Note that the max-frame-size parameter as defined in the
+		 * ePAPR v1.1 spec is defined as max-frame-size, it's
+		 * actually used as the IEEE definition of MAC Client
+		 * data, or MTU. The ePAPR specification is confusing as
+		 * the definition is max-frame-size, but usage examples
+		 * are clearly MTUs
+		 */
+		of_property_read_u32(np, "max-frame-size", &plat->maxmtu);
 		plat->has_gmac = 1;
 		plat->pmt = 1;
 	}
@@ -140,17 +203,24 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
 		}
 	}
 
+	/* Custom setup (if needed) */
+	if (plat_dat->setup) {
+		plat_dat->bsp_priv = plat_dat->setup(pdev);
+		if (IS_ERR(plat_dat->bsp_priv))
+			return PTR_ERR(plat_dat->bsp_priv);
+	}
+
 	/* Custom initialisation (if needed)*/
 	if (plat_dat->init) {
-		ret = plat_dat->init(pdev);
+		ret = plat_dat->init(pdev, plat_dat->bsp_priv);
 		if (unlikely(ret))
 			return ret;
 	}
 
 	priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
-	if (!priv) {
+	if (IS_ERR(priv)) {
 		pr_err("%s: main driver probe failed", __func__);
-		return -ENODEV;
+		return PTR_ERR(priv);
 	}
 
 	/* Get MAC address if available (DT) */
@@ -199,7 +269,10 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
 	int ret = stmmac_dvr_remove(ndev);
 
 	if (priv->plat->exit)
-		priv->plat->exit(pdev);
+		priv->plat->exit(pdev, priv->plat->bsp_priv);
+
+	if (priv->plat->free)
+		priv->plat->free(pdev, priv->plat->bsp_priv);
 
 	return ret;
 }
@@ -207,64 +280,34 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
 #ifdef CONFIG_PM
 static int stmmac_pltfr_suspend(struct device *dev)
 {
-	struct net_device *ndev = dev_get_drvdata(dev);
-
-	return stmmac_suspend(ndev);
-}
-
-static int stmmac_pltfr_resume(struct device *dev)
-{
-	struct net_device *ndev = dev_get_drvdata(dev);
-
-	return stmmac_resume(ndev);
-}
-
-int stmmac_pltfr_freeze(struct device *dev)
-{
 	int ret;
-	struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
 	struct net_device *ndev = dev_get_drvdata(dev);
+	struct stmmac_priv *priv = netdev_priv(ndev);
 	struct platform_device *pdev = to_platform_device(dev);
 
-	ret = stmmac_freeze(ndev);
-	if (plat_dat->exit)
-		plat_dat->exit(pdev);
+	ret = stmmac_suspend(ndev);
+	if (priv->plat->exit)
+		priv->plat->exit(pdev, priv->plat->bsp_priv);
 
 	return ret;
 }
 
-int stmmac_pltfr_restore(struct device *dev)
+static int stmmac_pltfr_resume(struct device *dev)
 {
-	struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
 	struct net_device *ndev = dev_get_drvdata(dev);
+	struct stmmac_priv *priv = netdev_priv(ndev);
 	struct platform_device *pdev = to_platform_device(dev);
 
-	if (plat_dat->init)
-		plat_dat->init(pdev);
+	if (priv->plat->init)
+		priv->plat->init(pdev, priv->plat->bsp_priv);
 
-	return stmmac_restore(ndev);
+	return stmmac_resume(ndev);
 }
 
-static const struct dev_pm_ops stmmac_pltfr_pm_ops = {
-	.suspend = stmmac_pltfr_suspend,
-	.resume = stmmac_pltfr_resume,
-	.freeze = stmmac_pltfr_freeze,
-	.thaw = stmmac_pltfr_restore,
-	.restore = stmmac_pltfr_restore,
-};
-#else
-static const struct dev_pm_ops stmmac_pltfr_pm_ops;
 #endif /* CONFIG_PM */
 
-static const struct of_device_id stmmac_dt_ids[] = {
-	{ .compatible = "st,spear600-gmac"},
-	{ .compatible = "snps,dwmac-3.610"},
-	{ .compatible = "snps,dwmac-3.70a"},
-	{ .compatible = "snps,dwmac-3.710"},
-	{ .compatible = "snps,dwmac"},
-	{ /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
+static SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops,
+			stmmac_pltfr_suspend, stmmac_pltfr_resume);
 
 struct platform_driver stmmac_pltfr_driver = {
 	.probe = stmmac_pltfr_probe,
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index b4d50d74ba18..df8d383acf48 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -14,9 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
- * 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * This driver uses the sungem driver (c) David Miller
  * (davem@redhat.com) as its basis.
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
index b361424d5f57..882ce168a799 100644
--- a/drivers/net/ethernet/sun/cassini.h
+++ b/drivers/net/ethernet/sun/cassini.h
@@ -15,9 +15,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
- * 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * vendor id: 0x108E (Sun Microsystems, Inc.)
  * device id: 0xabba (Cassini)
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 388540fcb977..8e2266e1f260 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3493,10 +3493,12 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
 
 	rh = (struct rx_pkt_hdr1 *) skb->data;
 	if (np->dev->features & NETIF_F_RXHASH)
-		skb->rxhash = ((u32)rh->hashval2_0 << 24 |
-			       (u32)rh->hashval2_1 << 16 |
-			       (u32)rh->hashval1_1 << 8 |
-			       (u32)rh->hashval1_2 << 0);
+		skb_set_hash(skb,
+			     ((u32)rh->hashval2_0 << 24 |
+			      (u32)rh->hashval2_1 << 16 |
+			      (u32)rh->hashval1_1 << 8 |
+			      (u32)rh->hashval1_2 << 0),
+			     PKT_HASH_TYPE_L3);
 	skb_pull(skb, sizeof(*rh));
 
 	rp->rx_packets++;
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 7217ee5d6273..206c1063815a 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -13,7 +13,6 @@
 #include <linux/in.h>
 #include <linux/string.h>
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/crc32.h>
 #include <linux/errno.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index b5655b79bd3b..c2799dc46325 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -24,7 +24,6 @@
 #include <linux/sched.h>
 #include <linux/string.h>
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 3df56840a3b9..1c24a8f368bd 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -751,7 +751,7 @@ static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
 	struct vnet_mcast_entry *m;
 
 	for (m = vp->mcast_list; m; m = m->next) {
-		if (!memcmp(m->addr, addr, ETH_ALEN))
+		if (ether_addr_equal(m->addr, addr))
 			return m;
 	}
 	return NULL;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 4f1d2549130e..2ead87759ab4 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1764,7 +1764,7 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
 	WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
 
 	/* We reclaimed resources, so in case the Q is stopped by xmit callback,
-	 * we resume the transmition and use tx_lock to synchronize with xmit.*/
+	 * we resume the transmission and use tx_lock to synchronize with xmit.*/
 	spin_lock(&priv->tx_lock);
 	priv->tx_level += tx_level;
 	BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 2dc16b6efaf0..73f74f369437 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -17,7 +17,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/moduleparam.h>
 
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 5330fd298705..bde63e3af96f 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -541,14 +541,93 @@ static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
 		return slave_num;
 }
 
+static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_ale *ale = priv->ale;
+	int i;
+
+	if (priv->data.dual_emac) {
+		bool flag = false;
+
+		/* Enabling promiscuous mode for one interface will be
+		 * common for both the interface as the interface shares
+		 * the same hardware resource.
+		 */
+		for (i = 0; i <= priv->data.slaves; i++)
+			if (priv->slaves[i].ndev->flags & IFF_PROMISC)
+				flag = true;
+
+		if (!enable && flag) {
+			enable = true;
+			dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
+		}
+
+		if (enable) {
+			/* Enable Bypass */
+			cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
+
+			dev_dbg(&ndev->dev, "promiscuity enabled\n");
+		} else {
+			/* Disable Bypass */
+			cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
+			dev_dbg(&ndev->dev, "promiscuity disabled\n");
+		}
+	} else {
+		if (enable) {
+			unsigned long timeout = jiffies + HZ;
+
+			/* Disable Learn for all ports */
+			for (i = 0; i <= priv->data.slaves; i++) {
+				cpsw_ale_control_set(ale, i,
+						     ALE_PORT_NOLEARN, 1);
+				cpsw_ale_control_set(ale, i,
+						     ALE_PORT_NO_SA_UPDATE, 1);
+			}
+
+			/* Clear All Untouched entries */
+			cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+			do {
+				cpu_relax();
+				if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
+					break;
+			} while (time_after(timeout, jiffies));
+			cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+
+			/* Clear all mcast from ALE */
+			cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
+						 priv->host_port);
+
+			/* Flood All Unicast Packets to Host port */
+			cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
+			dev_dbg(&ndev->dev, "promiscuity enabled\n");
+		} else {
+			/* Flood All Unicast Packets to Host port */
+			cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
+
+			/* Enable Learn for all ports */
+			for (i = 0; i <= priv->data.slaves; i++) {
+				cpsw_ale_control_set(ale, i,
+						     ALE_PORT_NOLEARN, 0);
+				cpsw_ale_control_set(ale, i,
+						     ALE_PORT_NO_SA_UPDATE, 0);
+			}
+			dev_dbg(&ndev->dev, "promiscuity disabled\n");
+		}
+	}
+}
+
 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
 {
 	struct cpsw_priv *priv = netdev_priv(ndev);
 
 	if (ndev->flags & IFF_PROMISC) {
 		/* Enable promiscuous mode */
-		dev_err(priv->dev, "Ignoring Promiscuous mode\n");
+		cpsw_set_promiscious(ndev, true);
 		return;
+	} else {
+		/* Disable promiscuous mode */
+		cpsw_set_promiscious(ndev, false);
 	}
 
 	/* Clear all mcast from ALE */
@@ -582,7 +661,7 @@ static void cpsw_intr_disable(struct cpsw_priv *priv)
 	return;
 }
 
-void cpsw_tx_handler(void *token, int len, int status)
+static void cpsw_tx_handler(void *token, int len, int status)
 {
 	struct sk_buff		*skb = token;
 	struct net_device	*ndev = skb->dev;
@@ -599,7 +678,7 @@ void cpsw_tx_handler(void *token, int len, int status)
 	dev_kfree_skb_any(skb);
 }
 
-void cpsw_rx_handler(void *token, int len, int status)
+static void cpsw_rx_handler(void *token, int len, int status)
 {
 	struct sk_buff		*skb = token;
 	struct sk_buff		*new_skb;
@@ -1257,29 +1336,6 @@ fail:
 	return NETDEV_TX_BUSY;
 }
 
-static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
-{
-	/*
-	 * The switch cannot operate in promiscuous mode without substantial
-	 * headache.  For promiscuous mode to work, we would need to put the
-	 * ALE in bypass mode and route all traffic to the host port.
-	 * Subsequently, the host will need to operate as a "bridge", learn,
-	 * and flood as needed.  For now, we simply complain here and
-	 * do nothing about it :-)
-	 */
-	if ((flags & IFF_PROMISC) && (ndev->flags & IFF_PROMISC))
-		dev_err(&ndev->dev, "promiscuity ignored!\n");
-
-	/*
-	 * The switch cannot filter multicast traffic unless it is configured
-	 * in "VLAN Aware" mode.  Unfortunately, VLAN awareness requires a
-	 * whole bunch of additional logic that this driver does not implement
-	 * at present.
-	 */
-	if ((flags & IFF_ALLMULTI) && !(ndev->flags & IFF_ALLMULTI))
-		dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n");
-}
-
 #ifdef CONFIG_TI_CPTS
 
 static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
@@ -1331,7 +1387,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
 	__raw_writel(ETH_P_1588, &priv->regs->ts_ltype);
 }
 
-static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
 {
 	struct cpsw_priv *priv = netdev_priv(dev);
 	struct cpts *cpts = priv->cpts;
@@ -1392,6 +1448,24 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
 }
 
+static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+	struct cpsw_priv *priv = netdev_priv(dev);
+	struct cpts *cpts = priv->cpts;
+	struct hwtstamp_config cfg;
+
+	if (priv->version != CPSW_VERSION_1 &&
+	    priv->version != CPSW_VERSION_2)
+		return -EOPNOTSUPP;
+
+	cfg.flags = 0;
+	cfg.tx_type = cpts->tx_enable ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+	cfg.rx_filter = (cpts->rx_enable ?
+			 HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE);
+
+	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
 #endif /*CONFIG_TI_CPTS*/
 
 static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
@@ -1406,7 +1480,9 @@ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 	switch (cmd) {
 #ifdef CONFIG_TI_CPTS
 	case SIOCSHWTSTAMP:
-		return cpsw_hwtstamp_ioctl(dev, req);
+		return cpsw_hwtstamp_set(dev, req);
+	case SIOCGHWTSTAMP:
+		return cpsw_hwtstamp_get(dev, req);
 #endif
 	case SIOCGMIIPHY:
 		data->phy_id = priv->slaves[slave_no].phy->addr;
@@ -1555,7 +1631,6 @@ static const struct net_device_ops cpsw_netdev_ops = {
 	.ndo_open		= cpsw_ndo_open,
 	.ndo_stop		= cpsw_ndo_stop,
 	.ndo_start_xmit		= cpsw_ndo_start_xmit,
-	.ndo_change_rx_flags	= cpsw_ndo_change_rx_flags,
 	.ndo_set_mac_address	= cpsw_ndo_set_mac_address,
 	.ndo_do_ioctl		= cpsw_ndo_ioctl,
 	.ndo_validate_addr	= eth_validate_addr,
@@ -2137,8 +2212,8 @@ static int cpsw_probe(struct platform_device *pdev)
 			  data->cpts_clock_mult, data->cpts_clock_shift))
 		dev_err(priv->dev, "error registering cpts device\n");
 
-	cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
-		    ss_res->start, ndev->irq);
+	cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
+		    &ss_res->start, ndev->irq);
 
 	if (priv->data.dual_emac) {
 		ret = cpsw_probe_dual_emac(pdev, priv);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 7fa60d6092ed..7f893069c418 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -163,7 +163,7 @@ int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
 		if (cpsw_ale_get_vlan_id(ale_entry) != vid)
 			continue;
 		cpsw_ale_get_addr(ale_entry, entry_addr);
-		if (memcmp(entry_addr, addr, 6) == 0)
+		if (ether_addr_equal(entry_addr, addr))
 			return idx;
 	}
 	return -ENOENT;
@@ -477,6 +477,14 @@ static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
 		.port_shift	= 0,
 		.bits		= 1,
 	},
+	[ALE_P0_UNI_FLOOD]	= {
+		.name		= "port0_unicast_flood",
+		.offset		= ALE_CONTROL,
+		.port_offset	= 0,
+		.shift		= 8,
+		.port_shift	= 0,
+		.bits		= 1,
+	},
 	[ALE_VLAN_NOLEARN]	= {
 		.name		= "vlan_nolearn",
 		.offset		= ALE_CONTROL,
@@ -573,6 +581,14 @@ static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
 		.port_shift	= 0,
 		.bits		= 1,
 	},
+	[ALE_PORT_NO_SA_UPDATE]	= {
+		.name		= "no_source_update",
+		.offset		= ALE_PORTCTL,
+		.port_offset	= 4,
+		.shift		= 5,
+		.port_shift	= 0,
+		.bits		= 1,
+	},
 	[ALE_PORT_MCAST_LIMIT]	= {
 		.name		= "mcast_limit",
 		.offset		= ALE_PORTCTL,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 30daa1265f0c..de409c33b250 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -34,6 +34,7 @@ enum cpsw_ale_control {
 	ALE_ENABLE,
 	ALE_CLEAR,
 	ALE_AGEOUT,
+	ALE_P0_UNI_FLOOD,
 	ALE_VLAN_NOLEARN,
 	ALE_NO_PORT_VLAN,
 	ALE_OUI_DENY,
@@ -47,6 +48,7 @@ enum cpsw_ale_control {
 	ALE_PORT_DROP_UNTAGGED,
 	ALE_PORT_DROP_UNKNOWN_VLAN,
 	ALE_PORT_NOLEARN,
+	ALE_PORT_NO_SA_UPDATE,
 	ALE_PORT_UNKNOWN_VLAN_MEMBER,
 	ALE_PORT_UNKNOWN_MCAST_FLOOD,
 	ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 90a79462c869..364d0c7952c0 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -81,7 +81,7 @@ struct cpdma_desc {
 };
 
 struct cpdma_desc_pool {
-	u32			phys;
+	phys_addr_t		phys;
 	u32			hw_addr;
 	void __iomem		*iomap;		/* ioremap map */
 	void			*cpumap;	/* dma_alloc map */
@@ -219,8 +219,7 @@ static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
 {
 	if (!desc)
 		return 0;
-	return pool->hw_addr + (__force dma_addr_t)desc -
-			    (__force dma_addr_t)pool->iomap;
+	return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
 }
 
 static inline struct cpdma_desc __iomem *
@@ -972,7 +971,7 @@ struct cpdma_control_info {
 #define ACCESS_RW	(ACCESS_RO | ACCESS_WO)
 };
 
-struct cpdma_control_info controls[] = {
+static struct cpdma_control_info controls[] = {
 	[CPDMA_CMD_IDLE]	  = {CPDMA_DMACONTROL,	3,  1,      ACCESS_WO},
 	[CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL,	4,  1,      ACCESS_RW},
 	[CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL,	2,  1,      ACCESS_RW},
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 4ec92659a100..0cca9dec5d82 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -82,7 +82,7 @@ struct davinci_mdio_regs {
 	}	user[0];
 };
 
-struct mdio_platform_data default_pdata = {
+static const struct mdio_platform_data default_pdata = {
 	.bus_freq = DEF_OUT_FREQ,
 };
 
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index 4083ba8839e1..f59a6c265331 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -9,20 +9,10 @@ config TILE_NET
 	select CRC32
 	select TILE_GXIO_MPIPE if TILEGX
 	select HIGH_RES_TIMERS if TILEGX
+	select PTP_1588_CLOCK if TILEGX
 	---help---
 	  This is a standard Linux network device driver for the
 	  on-chip Tilera Gigabit Ethernet and XAUI interfaces.
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called tile_net.
-
-config PTP_1588_CLOCK_TILEGX
-        tristate "Tilera TILE-Gx mPIPE as PTP clock"
-        select PTP_1588_CLOCK
-        depends on TILE_NET
-        depends on TILEGX
-        ---help---
-          This driver adds support for using the mPIPE as a PTP
-          clock. This clock is only useful if your PTP programs are
-          getting hardware time stamps on the PTP Ethernet packets
-          using the SO_TIMESTAMPING API.
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 0e9fb3301b11..023237a65720 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -187,10 +187,8 @@ struct tile_net_priv {
 	int echannel;
 	/* mPIPE instance, 0 or 1. */
 	int instance;
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
 	/* The timestamp config. */
 	struct hwtstamp_config stamp_cfg;
-#endif
 };
 
 static struct mpipe_data {
@@ -229,14 +227,12 @@ static struct mpipe_data {
 	int first_bucket;
 	int num_buckets;
 
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
 	/* PTP-specific data. */
 	struct ptp_clock *ptp_clock;
 	struct ptp_clock_info caps;
 
 	/* Lock for ptp accessors. */
 	struct mutex ptp_lock;
-#endif
 
 } mpipe_data[NR_MPIPE_MAX] = {
 	[0 ... (NR_MPIPE_MAX - 1)] {
@@ -451,20 +447,17 @@ static void tile_net_provide_needed_buffers(void)
 static void tile_rx_timestamp(struct tile_net_priv *priv, struct sk_buff *skb,
 			      gxio_mpipe_idesc_t *idesc)
 {
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
 	if (unlikely(priv->stamp_cfg.rx_filter != HWTSTAMP_FILTER_NONE)) {
 		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
 		shhwtstamps->hwtstamp = ktime_set(idesc->time_stamp_sec,
 						  idesc->time_stamp_ns);
 	}
-#endif
 }
 
 /* Get TX timestamp, and store it in the skb. */
 static void tile_tx_timestamp(struct sk_buff *skb, int instance)
 {
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
 	struct skb_shared_info *shtx = skb_shinfo(skb);
 	if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
 		struct mpipe_data *md = &mpipe_data[instance];
@@ -477,14 +470,11 @@ static void tile_tx_timestamp(struct sk_buff *skb, int instance)
 		shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
 		skb_tstamp_tx(skb, &shhwtstamps);
 	}
-#endif
 }
 
 /* Use ioctl() to enable or disable TX or RX timestamping. */
-static int tile_hwtstamp_ioctl(struct net_device *dev, struct ifreq *rq,
-			       int cmd)
+static int tile_hwtstamp_set(struct net_device *dev, struct ifreq *rq)
 {
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
 	struct hwtstamp_config config;
 	struct tile_net_priv *priv = netdev_priv(dev);
 
@@ -530,9 +520,17 @@ static int tile_hwtstamp_ioctl(struct net_device *dev, struct ifreq *rq,
 
 	priv->stamp_cfg = config;
 	return 0;
-#else
-	return -EOPNOTSUPP;
-#endif
+}
+
+static int tile_hwtstamp_get(struct net_device *dev, struct ifreq *rq)
+{
+	struct tile_net_priv *priv = netdev_priv(dev);
+
+	if (copy_to_user(rq->ifr_data, &priv->stamp_cfg,
+			 sizeof(priv->stamp_cfg)))
+		return -EFAULT;
+
+	return 0;
 }
 
 static inline bool filter_packet(struct net_device *dev, void *buf)
@@ -814,8 +812,6 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
 	return HRTIMER_NORESTART;
 }
 
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
-
 /* PTP clock operations. */
 
 static int ptp_mpipe_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
@@ -882,12 +878,9 @@ static struct ptp_clock_info ptp_mpipe_caps = {
 	.enable		= ptp_mpipe_enable,
 };
 
-#endif /* CONFIG_PTP_1588_CLOCK_TILEGX */
-
 /* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
 static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
 {
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
 	struct timespec ts;
 
 	getnstimeofday(&ts);
@@ -899,16 +892,13 @@ static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
 	if (IS_ERR(md->ptp_clock))
 		netdev_err(dev, "ptp_clock_register failed %ld\n",
 			   PTR_ERR(md->ptp_clock));
-#endif
 }
 
 /* Initialize PTP fields in a new device. */
 static void init_ptp_dev(struct tile_net_priv *priv)
 {
-#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
 	priv->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
 	priv->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
-#endif
 }
 
 /* Helper functions for "tile_net_update()". */
@@ -2099,7 +2089,9 @@ static void tile_net_tx_timeout(struct net_device *dev)
 static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
 	if (cmd == SIOCSHWTSTAMP)
-		return tile_hwtstamp_ioctl(dev, rq, cmd);
+		return tile_hwtstamp_set(dev, rq);
+	if (cmd == SIOCGHWTSTAMP)
+		return tile_hwtstamp_get(dev, rq);
 
 	return -EOPNOTSUPP;
 }
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index f7f2ef49c0c1..d899d0072ae0 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1739,12 +1739,14 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
 		GELIC_CARD_PORT_STATUS_CHANGED;
 
 
-	if (gelic_card_init_chain(card, &card->tx_chain,
-			card->descr, GELIC_NET_TX_DESCRIPTORS))
+	result = gelic_card_init_chain(card, &card->tx_chain,
+				       card->descr, GELIC_NET_TX_DESCRIPTORS);
+	if (result)
 		goto fail_alloc_tx;
-	if (gelic_card_init_chain(card, &card->rx_chain,
-				 card->descr + GELIC_NET_TX_DESCRIPTORS,
-				 GELIC_NET_RX_DESCRIPTORS))
+	result = gelic_card_init_chain(card, &card->rx_chain,
+				       card->descr + GELIC_NET_TX_DESCRIPTORS,
+				       GELIC_NET_RX_DESCRIPTORS);
+	if (result)
 		goto fail_alloc_rx;
 
 	/* head of chain */
@@ -1754,7 +1756,8 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
 		card->rx_top, card->tx_top, sizeof(struct gelic_descr),
 		GELIC_NET_RX_DESCRIPTORS);
 	/* allocate rx skbs */
-	if (gelic_card_alloc_rx_skbs(card))
+	result = gelic_card_alloc_rx_skbs(card);
+	if (result)
 		goto fail_alloc_skbs;
 
 	spin_lock_init(&card->tx_lock);
@@ -1772,7 +1775,8 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
 	}
 
 #ifdef CONFIG_GELIC_WIRELESS
-	if (gelic_wl_driver_probe(card)) {
+	result = gelic_wl_driver_probe(card);
+	if (result) {
 		dev_dbg(&dev->core, "%s: WL init failed\n", __func__);
 		goto fail_setup_netdev;
 	}
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 1322546d92ac..88e9c73cebc0 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -38,7 +38,6 @@ static const char *version = "tc35815.c:v" DRV_VERSION "\n";
 #include <linux/string.h>
 #include <linux/spinlock.h>
 #include <linux/errno.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
@@ -1170,19 +1169,12 @@ static int tc35815_tx_full(struct net_device *dev)
 static void tc35815_restart(struct net_device *dev)
 {
 	struct tc35815_local *lp = netdev_priv(dev);
+	int ret;
 
 	if (lp->phy_dev) {
-		int timeout;
-
-		phy_write(lp->phy_dev, MII_BMCR, BMCR_RESET);
-		timeout = 100;
-		while (--timeout) {
-			if (!(phy_read(lp->phy_dev, MII_BMCR) & BMCR_RESET))
-				break;
-			udelay(1);
-		}
-		if (!timeout)
-			printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name);
+		ret = phy_init_hw(lp->phy_dev);
+		if (ret)
+			printk(KERN_ERR "%s: PHY init failed.\n", dev->name);
 	}
 
 	spin_lock_bh(&lp->rx_lock);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index c4dbf981804b..47eeb3abf7f7 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -32,7 +32,6 @@
 
 #include <linux/module.h>
 #include <linux/types.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/net.h>
 #include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.h b/drivers/net/ethernet/tundra/tsi108_eth.h
index 5fee7d78dc6d..4a03c594b2b1 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.h
+++ b/drivers/net/ethernet/tundra/tsi108_eth.h
@@ -16,9 +16,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 2166e879a096..a4347508031c 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -29,7 +29,6 @@
 
 #include <linux/delay.h>
 #include <linux/etherdevice.h>
-#include <linux/init.h>
 #include <linux/mii.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index f9293da19e26..1ec65feebb9e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -22,7 +22,6 @@
 
 #include <linux/delay.h>
 #include <linux/etherdevice.h>
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/of_mdio.h>
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index fefb8cd5eb65..36052b98b3fc 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -14,7 +14,6 @@
 
 #include <linux/module.h>
 #include <linux/uaccess.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index bdd20b888cf6..7c81ffb861e8 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -27,8 +27,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  *
  * ALTERNATIVELY, this driver may be distributed under the terms of
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index bcc224a83734..25283f17d82f 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -373,7 +373,7 @@ static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
 	__raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
 }
 
-static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
 {
 	struct hwtstamp_config cfg;
 	struct ixp46x_ts_regs *regs;
@@ -417,6 +417,32 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
 }
 
+static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+	struct hwtstamp_config cfg;
+	struct port *port = netdev_priv(netdev);
+
+	cfg.flags = 0;
+	cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+
+	switch (port->hwts_rx_en) {
+	case 0:
+		cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+		break;
+	case PTP_SLAVE_MODE:
+		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+		break;
+	case PTP_MASTER_MODE:
+		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+		break;
+	default:
+		WARN_ON_ONCE(1);
+		return -ERANGE;
+	}
+
+	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
 static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
 			   int write, u16 cmd)
 {
@@ -959,8 +985,12 @@ static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 	if (!netif_running(dev))
 		return -EINVAL;
 
-	if (cpu_is_ixp46x() && cmd == SIOCSHWTSTAMP)
-		return hwtstamp_ioctl(dev, req, cmd);
+	if (cpu_is_ixp46x()) {
+		if (cmd == SIOCSHWTSTAMP)
+			return hwtstamp_set(dev, req);
+		if (cmd == SIOCGHWTSTAMP)
+			return hwtstamp_get(dev, req);
+	}
 
 	return phy_mii_ioctl(port->phydev, req, cmd);
 }
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 0b40e1c46f07..eb78203cd58e 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -206,7 +206,6 @@
 #include <linux/eisa.h>
 #include <linux/errno.h>
 #include <linux/fddidevice.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/kernel.h>
@@ -241,12 +240,6 @@ static char version[] =
  */
 #define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
 
-#ifdef CONFIG_PCI
-#define DFX_BUS_PCI(dev) (dev->bus == &pci_bus_type)
-#else
-#define DFX_BUS_PCI(dev) 0
-#endif
-
 #ifdef CONFIG_EISA
 #define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type)
 #else
@@ -436,7 +429,7 @@ static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
 static void dfx_get_bars(struct device *bdev,
 			 resource_size_t *bar_start, resource_size_t *bar_len)
 {
-	int dfx_bus_pci = DFX_BUS_PCI(bdev);
+	int dfx_bus_pci = dev_is_pci(bdev);
 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
 	int dfx_bus_tc = DFX_BUS_TC(bdev);
 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
@@ -518,7 +511,7 @@ static const struct net_device_ops dfx_netdev_ops = {
 static int dfx_register(struct device *bdev)
 {
 	static int version_disp;
-	int dfx_bus_pci = DFX_BUS_PCI(bdev);
+	int dfx_bus_pci = dev_is_pci(bdev);
 	int dfx_bus_tc = DFX_BUS_TC(bdev);
 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
 	const char *print_name = dev_name(bdev);
@@ -667,7 +660,7 @@ static void dfx_bus_init(struct net_device *dev)
 {
 	DFX_board_t *bp = netdev_priv(dev);
 	struct device *bdev = bp->bus_dev;
-	int dfx_bus_pci = DFX_BUS_PCI(bdev);
+	int dfx_bus_pci = dev_is_pci(bdev);
 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
 	int dfx_bus_tc = DFX_BUS_TC(bdev);
 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
@@ -813,7 +806,7 @@ static void dfx_bus_uninit(struct net_device *dev)
 {
 	DFX_board_t *bp = netdev_priv(dev);
 	struct device *bdev = bp->bus_dev;
-	int dfx_bus_pci = DFX_BUS_PCI(bdev);
+	int dfx_bus_pci = dev_is_pci(bdev);
 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
 	u8 val;
 
@@ -967,7 +960,7 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
 {
 	DFX_board_t *bp = netdev_priv(dev);
 	struct device *bdev = bp->bus_dev;
-	int dfx_bus_pci = DFX_BUS_PCI(bdev);
+	int dfx_bus_pci = dev_is_pci(bdev);
 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
 	int dfx_bus_tc = DFX_BUS_TC(bdev);
 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
@@ -1877,7 +1870,7 @@ static irqreturn_t dfx_interrupt(int irq, void *dev_id)
 	struct net_device *dev = dev_id;
 	DFX_board_t *bp = netdev_priv(dev);
 	struct device *bdev = bp->bus_dev;
-	int dfx_bus_pci = DFX_BUS_PCI(bdev);
+	int dfx_bus_pci = dev_is_pci(bdev);
 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
 	int dfx_bus_tc = DFX_BUS_TC(bdev);
 
@@ -3579,7 +3572,7 @@ static void dfx_unregister(struct device *bdev)
 {
 	struct net_device *dev = dev_get_drvdata(bdev);
 	DFX_board_t *bp = netdev_priv(dev);
-	int dfx_bus_pci = DFX_BUS_PCI(bdev);
+	int dfx_bus_pci = dev_is_pci(bdev);
 	int dfx_bus_tc = DFX_BUS_TC(bdev);
 	int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
 	resource_size_t bar_start = 0;		/* pointer to port */
diff --git a/drivers/net/fddi/skfp/fplustm.c b/drivers/net/fddi/skfp/fplustm.c
index f83993590174..7d3779ae7377 100644
--- a/drivers/net/fddi/skfp/fplustm.c
+++ b/drivers/net/fddi/skfp/fplustm.c
@@ -23,6 +23,7 @@
 #include "h/smc.h"
 #include "h/supern_2.h"
 #include <linux/bitrev.h>
+#include <linux/etherdevice.h>
 
 #ifndef	lint
 static const char ID_sccs[] = "@(#)fplustm.c	1.32 99/02/23 (C) SK " ;
@@ -55,14 +56,14 @@ static	char cam_warning [] = "E_SMT_004: CAM still busy\n";
 
 #define	DUMMY_READ()	smc->hw.mc_dummy = (u_short) inp(ADDR(B0_RAP))
 
-#define	CHECK_NPP() {	unsigned k = 10000 ;\
+#define	CHECK_NPP() {	unsigned int k = 10000 ;\
 			while ((inpw(FM_A(FM_STMCHN)) & FM_SNPPND) && k) k--;\
 			if (!k) { \
 				SMT_PANIC(smc,SMT_E0130, SMT_E0130_MSG) ; \
 			}	\
 		}
 
-#define	CHECK_CAM() {	unsigned k = 10 ;\
+#define	CHECK_CAM() {	unsigned int k = 10 ;\
 			while (!(inpw(FM_A(FM_AFSTAT)) & FM_DONE) && k) k--;\
 			if (!k) { \
 				SMT_PANIC(smc,SMT_E0131, SMT_E0131_MSG) ; \
@@ -356,25 +357,25 @@ static	void set_formac_addr(struct s_smc *smc)
 	long	t_requ = smc->mib.m[MAC0].fddiMACT_Req ;
 
 	outpw(FM_A(FM_SAID),my_said) ;	/* set short address */
-	outpw(FM_A(FM_LAIL),(unsigned)((smc->hw.fddi_home_addr.a[4]<<8) +
+	outpw(FM_A(FM_LAIL),(unsigned short)((smc->hw.fddi_home_addr.a[4]<<8) +
 					smc->hw.fddi_home_addr.a[5])) ;
-	outpw(FM_A(FM_LAIC),(unsigned)((smc->hw.fddi_home_addr.a[2]<<8) +
+	outpw(FM_A(FM_LAIC),(unsigned short)((smc->hw.fddi_home_addr.a[2]<<8) +
 					smc->hw.fddi_home_addr.a[3])) ;
-	outpw(FM_A(FM_LAIM),(unsigned)((smc->hw.fddi_home_addr.a[0]<<8) +
+	outpw(FM_A(FM_LAIM),(unsigned short)((smc->hw.fddi_home_addr.a[0]<<8) +
 					smc->hw.fddi_home_addr.a[1])) ;
 
 	outpw(FM_A(FM_SAGP),my_sagp) ;	/* set short group address */
 
-	outpw(FM_A(FM_LAGL),(unsigned)((smc->hw.fp.group_addr.a[4]<<8) +
+	outpw(FM_A(FM_LAGL),(unsigned short)((smc->hw.fp.group_addr.a[4]<<8) +
 					smc->hw.fp.group_addr.a[5])) ;
-	outpw(FM_A(FM_LAGC),(unsigned)((smc->hw.fp.group_addr.a[2]<<8) +
+	outpw(FM_A(FM_LAGC),(unsigned short)((smc->hw.fp.group_addr.a[2]<<8) +
 					smc->hw.fp.group_addr.a[3])) ;
-	outpw(FM_A(FM_LAGM),(unsigned)((smc->hw.fp.group_addr.a[0]<<8) +
+	outpw(FM_A(FM_LAGM),(unsigned short)((smc->hw.fp.group_addr.a[0]<<8) +
 					smc->hw.fp.group_addr.a[1])) ;
 
 	/* set r_request regs. (MSW & LSW of TRT ) */
-	outpw(FM_A(FM_TREQ1),(unsigned)(t_requ>>16)) ;
-	outpw(FM_A(FM_TREQ0),(unsigned)t_requ) ;
+	outpw(FM_A(FM_TREQ1),(unsigned short)(t_requ>>16)) ;
+	outpw(FM_A(FM_TREQ0),(unsigned short)t_requ) ;
 }
 
 static void set_int(char *p, int l)
@@ -394,10 +395,10 @@ static void set_int(char *p, int l)
  *	append 'end of chain' pointer
  */
 static void copy_tx_mac(struct s_smc *smc, u_long td, struct fddi_mac *mac,
-			unsigned off, int len)
+			unsigned int off, int len)
 /* u_long td;		 transmit descriptor */
 /* struct fddi_mac *mac; mac frame pointer */
-/* unsigned off;	 start address within buffer memory */
+/* unsigned int off;	 start address within buffer memory */
 /* int len ;		 length of the frame including the FC */
 {
 	int	i ;
@@ -1082,7 +1083,7 @@ static struct s_fpmc* mac_get_mc_table(struct s_smc *smc,
 				slot = tb ;
 			continue ;
 		}
-		if (memcmp((char *)&tb->a,(char *)own,6))
+		if (!ether_addr_equal((char *)&tb->a, (char *)own))
 			continue ;
 		return tb;
 	}
diff --git a/drivers/net/fddi/skfp/h/supern_2.h b/drivers/net/fddi/skfp/h/supern_2.h
index 0b73690280f6..4ee360d2dc62 100644
--- a/drivers/net/fddi/skfp/h/supern_2.h
+++ b/drivers/net/fddi/skfp/h/supern_2.h
@@ -92,33 +92,33 @@
 union rx_descr {
 	struct {
 #ifdef	LITTLE_ENDIAN
-	unsigned	rx_length :16 ;	/* frame length lower/upper byte */
-	unsigned	rx_erfbb  :2 ;	/* received frame byte boundary */
-	unsigned	rx_reserv2:2 ;	/* reserved */	
-	unsigned	rx_sfrmty :3 ;	/* frame type bits */
-	unsigned	rx_sadrrg :1 ;	/* DA == MA or broad-/multicast */
-	unsigned	rx_sfrmerr:1 ;	/* received frame not valid */
-	unsigned	rx_seac0  :1 ;	/* frame-copied  C-indicator */
-	unsigned	rx_seac1  :1 ;	/* address-match A-indicator */
-	unsigned	rx_seac2  :1 ;	/* frame-error   E-indicator */
-	unsigned	rx_ssrcrtg:1 ;	/* == 1 SA has MSB set */
-	unsigned	rx_reserv1:1 ;	/* reserved */	
-	unsigned	rx_msrabt :1 ;	/* memory status receive abort */
-	unsigned	rx_msvalid:1 ;	/* memory status valid */
+	unsigned int	rx_length :16 ;	/* frame length lower/upper byte */
+	unsigned int	rx_erfbb  :2 ;	/* received frame byte boundary */
+	unsigned int	rx_reserv2:2 ;	/* reserved */
+	unsigned int	rx_sfrmty :3 ;	/* frame type bits */
+	unsigned int	rx_sadrrg :1 ;	/* DA == MA or broad-/multicast */
+	unsigned int	rx_sfrmerr:1 ;	/* received frame not valid */
+	unsigned int	rx_seac0  :1 ;	/* frame-copied  C-indicator */
+	unsigned int	rx_seac1  :1 ;	/* address-match A-indicator */
+	unsigned int	rx_seac2  :1 ;	/* frame-error   E-indicator */
+	unsigned int	rx_ssrcrtg:1 ;	/* == 1 SA has MSB set */
+	unsigned int	rx_reserv1:1 ;	/* reserved */
+	unsigned int	rx_msrabt :1 ;	/* memory status receive abort */
+	unsigned int	rx_msvalid:1 ;	/* memory status valid */
 #else
-	unsigned	rx_msvalid:1 ;	/* memory status valid */
-	unsigned	rx_msrabt :1 ;	/* memory status receive abort */
-	unsigned	rx_reserv1:1 ;	/* reserved */	
-	unsigned	rx_ssrcrtg:1 ;	/* == 1 SA has MSB set */
-	unsigned	rx_seac2  :1 ;	/* frame-error   E-indicator */
-	unsigned	rx_seac1  :1 ;	/* address-match A-indicator */
-	unsigned	rx_seac0  :1 ;	/* frame-copied  C-indicator */
-	unsigned	rx_sfrmerr:1 ;	/* received frame not valid */
-	unsigned	rx_sadrrg :1 ;	/* DA == MA or broad-/multicast */
-	unsigned	rx_sfrmty :3 ;	/* frame type bits */
-	unsigned	rx_erfbb  :2 ;	/* received frame byte boundary */
-	unsigned	rx_reserv2:2 ;	/* reserved */	
-	unsigned	rx_length :16 ;	/* frame length lower/upper byte */
+	unsigned int	rx_msvalid:1 ;	/* memory status valid */
+	unsigned int	rx_msrabt :1 ;	/* memory status receive abort */
+	unsigned int	rx_reserv1:1 ;	/* reserved */
+	unsigned int	rx_ssrcrtg:1 ;	/* == 1 SA has MSB set */
+	unsigned int	rx_seac2  :1 ;	/* frame-error   E-indicator */
+	unsigned int	rx_seac1  :1 ;	/* address-match A-indicator */
+	unsigned int	rx_seac0  :1 ;	/* frame-copied  C-indicator */
+	unsigned int	rx_sfrmerr:1 ;	/* received frame not valid */
+	unsigned int	rx_sadrrg :1 ;	/* DA == MA or broad-/multicast */
+	unsigned int	rx_sfrmty :3 ;	/* frame type bits */
+	unsigned int	rx_erfbb  :2 ;	/* received frame byte boundary */
+	unsigned int	rx_reserv2:2 ;	/* reserved */
+	unsigned int	rx_length :16 ;	/* frame length lower/upper byte */
 #endif
 	} r ;
 	long	i ;
@@ -162,23 +162,23 @@ union rx_descr {
 union tx_descr {
 	struct {
 #ifdef	LITTLE_ENDIAN
-	unsigned	tx_length:16 ;	/* frame length lower/upper byte */
-	unsigned	tx_res	 :8 ;	/* reserved 	 (bit 16..23) */
-	unsigned	tx_xmtabt:1 ;	/* transmit abort */
-	unsigned	tx_nfcs  :1 ;	/* no frame check sequence */
-	unsigned	tx_xdone :1 ;	/* give up token */
-	unsigned	tx_rpxm  :2 ;	/* byte offset */
-	unsigned	tx_pat1  :2 ;	/* must be TXP1 */
-	unsigned	tx_more	 :1 ;	/* more frame in chain */
+	unsigned int	tx_length:16 ;	/* frame length lower/upper byte */
+	unsigned int	tx_res	 :8 ;	/* reserved 	 (bit 16..23) */
+	unsigned int	tx_xmtabt:1 ;	/* transmit abort */
+	unsigned int	tx_nfcs  :1 ;	/* no frame check sequence */
+	unsigned int	tx_xdone :1 ;	/* give up token */
+	unsigned int	tx_rpxm  :2 ;	/* byte offset */
+	unsigned int	tx_pat1  :2 ;	/* must be TXP1 */
+	unsigned int	tx_more	 :1 ;	/* more frame in chain */
 #else
-	unsigned	tx_more	 :1 ;	/* more frame in chain */
-	unsigned	tx_pat1  :2 ;	/* must be TXP1 */
-	unsigned	tx_rpxm  :2 ;	/* byte offset */
-	unsigned	tx_xdone :1 ;	/* give up token */
-	unsigned	tx_nfcs  :1 ;	/* no frame check sequence */
-	unsigned	tx_xmtabt:1 ;	/* transmit abort */
-	unsigned	tx_res	 :8 ;	/* reserved 	 (bit 16..23) */
-	unsigned	tx_length:16 ;	/* frame length lower/upper byte */
+	unsigned int	tx_more	 :1 ;	/* more frame in chain */
+	unsigned int	tx_pat1  :2 ;	/* must be TXP1 */
+	unsigned int	tx_rpxm  :2 ;	/* byte offset */
+	unsigned int	tx_xdone :1 ;	/* give up token */
+	unsigned int	tx_nfcs  :1 ;	/* no frame check sequence */
+	unsigned int	tx_xmtabt:1 ;	/* transmit abort */
+	unsigned int	tx_res	 :8 ;	/* reserved 	 (bit 16..23) */
+	unsigned int	tx_length:16 ;	/* frame length lower/upper byte */
 #endif
 	} t ;
 	long	i ;
@@ -202,13 +202,13 @@ union tx_descr {
 union tx_pointer {
 	struct t {
 #ifdef	LITTLE_ENDIAN
-	unsigned	tp_pointer:16 ;	/* pointer to tx_descr (low/high) */
-	unsigned	tp_res	  :8 ;	/* reserved 	 (bit 16..23) */
-	unsigned	tp_pattern:8 ;	/* fixed pattern (bit 24..31) */
+	unsigned int	tp_pointer:16 ;	/* pointer to tx_descr (low/high) */
+	unsigned int	tp_res	  :8 ;	/* reserved 	 (bit 16..23) */
+	unsigned int	tp_pattern:8 ;	/* fixed pattern (bit 24..31) */
 #else
-	unsigned	tp_pattern:8 ;	/* fixed pattern (bit 24..31) */
-	unsigned	tp_res	  :8 ;	/* reserved 	 (bit 16..23) */
-	unsigned	tp_pointer:16 ;	/* pointer to tx_descr (low/high) */
+	unsigned int	tp_pattern:8 ;	/* fixed pattern (bit 24..31) */
+	unsigned int	tp_res	  :8 ;	/* reserved 	 (bit 16..23) */
+	unsigned int	tp_pointer:16 ;	/* pointer to tx_descr (low/high) */
 #endif
 	} t ;
 	long	i ;
diff --git a/drivers/net/fddi/skfp/h/targetos.h b/drivers/net/fddi/skfp/h/targetos.h
index 53bacc107160..355194251ff8 100644
--- a/drivers/net/fddi/skfp/h/targetos.h
+++ b/drivers/net/fddi/skfp/h/targetos.h
@@ -48,7 +48,6 @@
 #include <linux/fddidevice.h>
 #include <linux/skbuff.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 
 // is redefined by linux, but we need our definition
 #undef ADDR
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 713d303a06a9..d5f58121b2e2 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -351,7 +351,6 @@ static void skfp_remove_one(struct pci_dev *pdev)
 	free_netdev(p);
 
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 }
 
 /*
diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c
index 08d94329c12f..9edada85ed02 100644
--- a/drivers/net/fddi/skfp/smt.c
+++ b/drivers/net/fddi/skfp/smt.c
@@ -900,7 +900,7 @@ static void smt_send_rdf(struct s_smc *smc, SMbuf *rej, int fc, int reason,
 	rdf->version.v_pad2 = 0 ;
 
 	/* set P13 */
-	if ((unsigned) frame_len <= SMT_MAX_INFO_LEN - sizeof(*rdf) +
+	if ((unsigned int) frame_len <= SMT_MAX_INFO_LEN - sizeof(*rdf) +
 		2*sizeof(struct smt_header))
 		len = frame_len ;
 	else
diff --git a/drivers/net/fddi/skfp/srf.c b/drivers/net/fddi/skfp/srf.c
index f6f7baf9f27a..cc27dea3414e 100644
--- a/drivers/net/fddi/skfp/srf.c
+++ b/drivers/net/fddi/skfp/srf.c
@@ -73,7 +73,7 @@ void smt_init_evc(struct s_smc *smc)
 {
 	struct s_srf_evc	*evc ;
 	const struct evc_init 	*init ;
-	int			i ;
+	unsigned int		i ;
 	int			index ;
 	int			offset ;
 
@@ -84,7 +84,7 @@ void smt_init_evc(struct s_smc *smc)
 	evc = smc->evcs ;
 	init = evc_inits ;
 
-	for (i = 0 ; (unsigned) i < MAX_INIT_EVC ; i++) {
+	for (i = 0 ; i < MAX_INIT_EVC ; i++) {
 		for (index = 0 ; index < init->n ; index++) {
 			evc->evc_code = init->code ;
 			evc->evc_para = init->para ;
@@ -98,7 +98,7 @@ void smt_init_evc(struct s_smc *smc)
 		init++ ;
 	}
 
-	if ((unsigned) (evc - smc->evcs) > MAX_EVCS) {
+	if ((unsigned int) (evc - smc->evcs) > MAX_EVCS) {
 		SMT_PANIC(smc,SMT_E0127, SMT_E0127_MSG) ;
 	}
 
@@ -139,7 +139,7 @@ void smt_init_evc(struct s_smc *smc)
 		offset++ ;
 	}
 #ifdef	DEBUG
-	for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+	for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
 		if (SMT_IS_CONDITION(evc->evc_code)) {
 			if (!evc->evc_cond_state) {
 				SMT_PANIC(smc,SMT_E0128, SMT_E0128_MSG) ;
@@ -160,10 +160,10 @@ void smt_init_evc(struct s_smc *smc)
 
 static struct s_srf_evc *smt_get_evc(struct s_smc *smc, int code, int index)
 {
-	int			i ;
+	unsigned int		i ;
 	struct s_srf_evc	*evc ;
 
-	for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+	for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
 		if (evc->evc_code == code && evc->evc_index == index)
 			return evc;
 	}
@@ -335,9 +335,9 @@ void smt_srf_event(struct s_smc *smc, int code, int index, int cond)
 static void clear_all_rep(struct s_smc *smc)
 {
 	struct s_srf_evc	*evc ;
-	int			i ;
+	unsigned int		i ;
 
-	for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+	for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
 		evc->evc_rep_required = FALSE ;
 		if (SMT_IS_CONDITION(evc->evc_code))
 			*evc->evc_cond_state = FALSE ;
@@ -348,10 +348,10 @@ static void clear_all_rep(struct s_smc *smc)
 static void clear_reported(struct s_smc *smc)
 {
 	struct s_srf_evc	*evc ;
-	int			i ;
+	unsigned int		i ;
 
 	smc->srf.any_report = FALSE ;
-	for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+	for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
 		if (SMT_IS_CONDITION(evc->evc_code)) {
 			if (*evc->evc_cond_state == FALSE)
 				evc->evc_rep_required = FALSE ;
@@ -375,7 +375,7 @@ static void smt_send_srf(struct s_smc *smc)
 	struct s_srf_evc	*evc ;
 	SK_LOC_DECL(struct s_pcon,pcon) ;
 	SMbuf			*mb ;
-	int			i ;
+	unsigned int		i ;
 
 	static const struct fddi_addr SMT_SRF_DA = {
 		{ 0x80, 0x01, 0x43, 0x00, 0x80, 0x08 }
@@ -405,7 +405,7 @@ static void smt_send_srf(struct s_smc *smc)
 	smt_add_para(smc,&pcon,(u_short) SMT_P1033,0,0) ;
 	smt_add_para(smc,&pcon,(u_short) SMT_P1034,0,0) ;
 
-	for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
+	for (i = 0, evc = smc->evcs ; i < MAX_EVCS ; i++, evc++) {
 		if (evc->evc_rep_required) {
 			smt_add_para(smc,&pcon,evc->evc_para,
 				(int)evc->evc_index,0) ;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 1450e33fc250..66e2b19ef709 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -662,7 +662,8 @@ static int sixpack_open(struct tty_struct *tty)
 	tty->receive_room = 65536;
 
 	/* Now we're ready to register. */
-	if (register_netdev(dev))
+	err = register_netdev(dev);
+	if (err)
 		goto out_free;
 
 	tnc_init(sp);
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index f91bf0ddf031..d50b23cf9ea9 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -208,7 +208,7 @@ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
 	eth = eth_hdr(skb);
 
 	if (!(bpq->acpt_addr[0] & 0x01) &&
-	    memcmp(eth->h_source, bpq->acpt_addr, ETH_ALEN))
+	    !ether_addr_equal(eth->h_source, bpq->acpt_addr))
 		goto drop_unlock;
 
 	if (skb_cow(skb, sizeof(struct ethhdr)))
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 8e01c457015b..8a6c720a4cc9 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -9,8 +9,7 @@
  *  for more details.
  *
  *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *  with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * Copyright (C) Hans Alblas PE1AYX <hans@esrac.ele.tue.nl>
  * Copyright (C) 2004, 05 Ralf Baechle DL5RB <ralf@linux-mips.org>
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 00ed75155ce8..e580583f196d 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -37,7 +37,6 @@
 #include <linux/netdevice.h>
 #include <linux/hippidevice.h>
 #include <linux/skbuff.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
@@ -213,10 +212,8 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 				    rrpriv->tx_ring_dma);
 	if (rrpriv->regs)
 		pci_iounmap(pdev, rrpriv->regs);
-	if (pdev) {
+	if (pdev)
 		pci_release_regions(pdev);
-		pci_set_drvdata(pdev, NULL);
-	}
  out2:
 	free_netdev(dev);
  out3:
@@ -244,7 +241,6 @@ static void rr_remove_one(struct pci_dev *pdev)
 	pci_iounmap(pdev, rr->regs);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(dev);
 }
 
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index e6fe0d80d612..a26eecb1212c 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -12,8 +12,7 @@
  * more details.
  *
  * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * Authors:
  *   Haiyang Zhang <haiyangz@microsoft.com>
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 2b0480416b31..93b485b96249 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -11,8 +11,7 @@
  * more details.
  *
  * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * Authors:
  *   Haiyang Zhang <haiyangz@microsoft.com>
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 71baeb3ed905..7756118c2f0a 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -11,8 +11,7 @@
  * more details.
  *
  * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * Authors:
  *   Haiyang Zhang <haiyangz@microsoft.com>
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 0775f0aefd1e..1084e5de3ceb 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -11,8 +11,7 @@
  * more details.
  *
  * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
+ * this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * Authors:
  *   Haiyang Zhang <haiyangz@microsoft.com>
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 2cbe1c249996..ab31544bc254 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -987,7 +987,6 @@ err_gpio_dir:
 err_slp_tr:
 	gpio_free(lp->rstn);
 err_rstn:
-	spi_set_drvdata(spi, NULL);
 	mutex_destroy(&lp->bmux);
 	ieee802154_free_device(lp->dev);
 	return rc;
@@ -1006,7 +1005,6 @@ static int at86rf230_remove(struct spi_device *spi)
 		gpio_free(lp->slp_tr);
 	gpio_free(lp->rstn);
 
-	spi_set_drvdata(spi, NULL);
 	mutex_destroy(&lp->bmux);
 	ieee802154_free_device(lp->dev);
 
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index c6e46d6e9f75..246befa4ba05 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -715,7 +715,6 @@ static int mrf24j40_remove(struct spi_device *spi)
 	 * complete? */
 
 	/* Clean up the SPI stuff. */
-	spi_set_drvdata(spi, NULL);
 	kfree(devrec->buf);
 	kfree(devrec);
 	return 0;
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 2a30193d0d50..2dc82f1d2e70 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -62,8 +62,6 @@ config SIR_BFIN_PIO
 	bool "PIO mode"
 endchoice
 
-comment "Dongle support"
-
 config SH_SIR
 	tristate "SuperH SIR on UART"
 	depends on IRDA && SUPERH && \
@@ -74,6 +72,8 @@ config SH_SIR
 	  Say Y here if your want to enable SIR function on SuperH UART
 	  devices.
 
+comment "Dongle support"
+
 config DONGLE
 	bool "Serial dongle support"
 	depends on IRTTY_SIR
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 7a1f684edcb5..5f91e3e01c04 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -15,11 +15,9 @@
  *  for more details.
  *
  *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *  with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/interrupt.h>
diff --git a/drivers/net/irda/esi-sir.c b/drivers/net/irda/esi-sir.c
index a908df7c4b9d..019a3e848bcb 100644
--- a/drivers/net/irda/esi-sir.c
+++ b/drivers/net/irda/esi-sir.c
@@ -25,9 +25,7 @@
  *     GNU General Public License for more details.
  * 
  *     You should have received a copy of the GNU General Public License 
- *     along with this program; if not, write to the Free Software 
- *     Foundation, Inc., 59 Temple Place, Suite 330, Boston, 
- *     MA 02111-1307 USA
+ *     along with this program; if not, see <http://www.gnu.org/licenses/>.
  *     
  ********************************************************************/
 
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index f9a86bdb12fa..925b78cc9797 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -58,7 +58,6 @@
 #include <linux/moduleparam.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
-#include <linux/init.h>
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
 #include <linux/slab.h>
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index 7b4833874ef5..96fe3659012d 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -64,7 +64,6 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/errno.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/usb.h>
 #include <linux/device.h>
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index 5f3aeac3f86d..e6b3804edacd 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -116,7 +116,6 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/errno.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/usb.h>
 #include <linux/device.h>
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 2d4b6a1ab202..37f23a189b35 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -80,7 +80,6 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/errno.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/usb.h>
 #include <linux/device.h>
diff --git a/drivers/net/irda/litelink-sir.c b/drivers/net/irda/litelink-sir.c
index d6d9d2e5ad49..6827777cbeea 100644
--- a/drivers/net/irda/litelink-sir.c
+++ b/drivers/net/irda/litelink-sir.c
@@ -22,9 +22,7 @@
  *     GNU General Public License for more details.
  * 
  *     You should have received a copy of the GNU General Public License 
- *     along with this program; if not, write to the Free Software 
- *     Foundation, Inc., 59 Temple Place, Suite 330, Boston, 
- *     MA 02111-1307 USA
+ *     along with this program; if not, see <http://www.gnu.org/licenses/>.
  *     
  ********************************************************************/
 
diff --git a/drivers/net/irda/ma600-sir.c b/drivers/net/irda/ma600-sir.c
index e91216452379..a9a81358477b 100644
--- a/drivers/net/irda/ma600-sir.c
+++ b/drivers/net/irda/ma600-sir.c
@@ -25,9 +25,7 @@
  *     GNU General Public License for more details.
  * 
  *     You should have received a copy of the GNU General Public License 
- *     along with this program; if not, write to the Free Software 
- *     Foundation, Inc., 59 Temple Place, Suite 330, Boston, 
- *     MA 02111-1307 USA
+ *     along with this program; if not, see <http://www.gnu.org/licenses/>.
  *     
  ********************************************************************/
 
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 3f138ca88670..16f8ffb50e04 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -48,7 +48,6 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/errno.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/usb.h>
 #include <linux/device.h>
diff --git a/drivers/net/irda/old_belkin-sir.c b/drivers/net/irda/old_belkin-sir.c
index 75714bc71030..f237136f3827 100644
--- a/drivers/net/irda/old_belkin-sir.c
+++ b/drivers/net/irda/old_belkin-sir.c
@@ -22,9 +22,7 @@
  *     GNU General Public License for more details.
  * 
  *     You should have received a copy of the GNU General Public License 
- *     along with this program; if not, write to the Free Software 
- *     Foundation, Inc., 59 Temple Place, Suite 330, Boston, 
- *     MA 02111-1307 USA
+ *     along with this program; if not, see <http://www.gnu.org/licenses/>.
  *     
  ********************************************************************/
 
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index ff45cd0d60e8..c96b46b2c3a8 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -804,7 +804,7 @@ static int sh_irda_probe(struct platform_device *pdev)
 		goto err_mem_4;
 
 	platform_set_drvdata(pdev, ndev);
-	err = request_irq(irq, sh_irda_irq, 0, "sh_irda", self);
+	err = devm_request_irq(&pdev->dev, irq, sh_irda_irq, 0, "sh_irda", self);
 	if (err) {
 		dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
 		goto err_mem_4;
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 8d9ae5a086d5..cadf52e22464 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -761,7 +761,7 @@ static int sh_sir_probe(struct platform_device *pdev)
 		goto err_mem_4;
 
 	platform_set_drvdata(pdev, ndev);
-	err = request_irq(irq, sh_sir_irq, 0, "sh_sir", self);
+	err = devm_request_irq(&pdev->dev, irq, sh_sir_irq, 0, "sh_sir", self);
 	if (err) {
 		dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
 		goto err_mem_4;
diff --git a/drivers/net/irda/sir_dongle.c b/drivers/net/irda/sir_dongle.c
index 2a9930e6e2af..cfbabb63f5cc 100644
--- a/drivers/net/irda/sir_dongle.c
+++ b/drivers/net/irda/sir_dongle.c
@@ -13,7 +13,6 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/kmod.h>
 #include <linux/mutex.h>
 
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 0dcdf1592f6b..282120430f12 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -34,9 +34,7 @@
  *     GNU General Public License for more details.
  *
  *     You should have received a copy of the GNU General Public License
- *     along with this program; if not, write to the Free Software
- *     Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- *     MA 02111-1307 USA
+ *     along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  ********************************************************************/
 
diff --git a/drivers/net/irda/smsc-ircc2.h b/drivers/net/irda/smsc-ircc2.h
index 317b7fd69bb3..4829fa22cb29 100644
--- a/drivers/net/irda/smsc-ircc2.h
+++ b/drivers/net/irda/smsc-ircc2.h
@@ -25,9 +25,7 @@
  *     GNU General Public License for more details.
  *
  *     You should have received a copy of the GNU General Public License
- *     along with this program; if not, write to the Free Software
- *     Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- *     MA 02111-1307 USA
+ *     along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  ********************************************************************/
 
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 876e709b65ba..dd1bd1060ec9 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -41,7 +41,6 @@
 
 #include <linux/kernel.h>
 #include <linux/types.h>
-#include <linux/init.h>
 #include <linux/time.h>
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 9abaec27f962..2900af091c2d 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -17,8 +17,7 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 See the GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc.,
-59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+this program; if not, see <http://www.gnu.org/licenses/>.
 
 F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
 F02 Oct/28/02: Add SB device ID for 3147 and 3177.
@@ -408,7 +407,6 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
  err_out2:
 	release_region(self->io.fir_base, self->io.fir_ext);
  err_out1:
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(dev);
 	return err;
 }
@@ -442,7 +440,6 @@ static void via_remove_one(struct pci_dev *pdev)
 	if (self->rx_buff.head)
 		dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
 				  self->rx_buff.head, self->rx_buff_dma);
-	pci_set_drvdata(pdev, NULL);
 
 	free_netdev(self->netdev);
 
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
index f903a6a2dcb7..7ce820ecc361 100644
--- a/drivers/net/irda/via-ircc.h
+++ b/drivers/net/irda/via-ircc.h
@@ -18,8 +18,7 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 See the GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc.,
-59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+this program; if not, see <http://www.gnu.org/licenses/>.
 
  * Comment:
  * jul/08/2002 : Rx buffer length should use Rx ring ptr.	
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index c5bd58b4d8a8..485006604bbc 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -15,9 +15,7 @@
  *	GNU General Public License for more details.
  *
  *	You should have received a copy of the GNU General Public License 
- *	along with this program; if not, write to the Free Software 
- *	Foundation, Inc., 59 Temple Place, Suite 330, Boston, 
- *	MA 02111-1307 USA
+ *	along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  ********************************************************************/
 
@@ -1695,7 +1693,6 @@ out_freedev:
 out_disable:
 	pci_disable_device(pdev);
 out:
-	pci_set_drvdata(pdev, NULL);
 	return -ENODEV;
 }
 
@@ -1721,8 +1718,6 @@ static void vlsi_irda_remove(struct pci_dev *pdev)
 
 	free_netdev(ndev);
 
-	pci_set_drvdata(pdev, NULL);
-
 	IRDA_MESSAGE("%s: %s removed\n", drivername, pci_name(pdev));
 }
 
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index a076eb125349..56399204e68c 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -18,9 +18,7 @@
  *	GNU General Public License for more details.
  *
  *	You should have received a copy of the GNU General Public License 
- *	along with this program; if not, write to the Free Software 
- *	Foundation, Inc., 59 Temple Place, Suite 330, Boston, 
- *	MA 02111-1307 USA
+ *	along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  ********************************************************************/
 
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index ac24c27b4b2d..c5011e078e1b 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -39,7 +39,6 @@
 #include <linux/errno.h>
 #include <linux/fcntl.h>
 #include <linux/in.h>
-#include <linux/init.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index bc8faaec33f5..8433de4509c7 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -120,7 +120,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
 	struct net_device *dev = vlan->dev;
 
 	if (local)
-		return vlan->forward(dev, skb);
+		return dev_forward_skb(dev, skb);
 
 	skb->dev = dev;
 	if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
@@ -128,7 +128,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
 	else
 		skb->pkt_type = PACKET_MULTICAST;
 
-	return vlan->receive(skb);
+	return netif_rx(skb);
 }
 
 static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
@@ -251,7 +251,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
 	skb->dev = dev;
 	skb->pkt_type = PACKET_HOST;
 
-	ret = vlan->receive(skb);
+	ret = netif_rx(skb);
 
 out:
 	macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
@@ -290,8 +290,8 @@ xmit_world:
 	return dev_queue_xmit(skb);
 }
 
-netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
-			       struct net_device *dev)
+static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
+				      struct net_device *dev)
 {
 	unsigned int len = skb->len;
 	int ret;
@@ -305,7 +305,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
 	}
 
 	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
-		struct macvlan_pcpu_stats *pcpu_stats;
+		struct vlan_pcpu_stats *pcpu_stats;
 
 		pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
 		u64_stats_update_begin(&pcpu_stats->syncp);
@@ -317,7 +317,6 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
 	}
 	return ret;
 }
-EXPORT_SYMBOL_GPL(macvlan_start_xmit);
 
 static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
 			       unsigned short type, const void *daddr,
@@ -547,12 +546,12 @@ static int macvlan_init(struct net_device *dev)
 
 	macvlan_set_lockdep_class(dev);
 
-	vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats);
+	vlan->pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
 	if (!vlan->pcpu_stats)
 		return -ENOMEM;
 
 	for_each_possible_cpu(i) {
-		struct macvlan_pcpu_stats *mvlstats;
+		struct vlan_pcpu_stats *mvlstats;
 		mvlstats = per_cpu_ptr(vlan->pcpu_stats, i);
 		u64_stats_init(&mvlstats->syncp);
 	}
@@ -578,7 +577,7 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
 	struct macvlan_dev *vlan = netdev_priv(dev);
 
 	if (vlan->pcpu_stats) {
-		struct macvlan_pcpu_stats *p;
+		struct vlan_pcpu_stats *p;
 		u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
 		u32 rx_errors = 0, tx_dropped = 0;
 		unsigned int start;
@@ -814,10 +813,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
 }
 
 int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
-			   struct nlattr *tb[], struct nlattr *data[],
-			   int (*receive)(struct sk_buff *skb),
-			   int (*forward)(struct net_device *dev,
-					  struct sk_buff *skb))
+			   struct nlattr *tb[], struct nlattr *data[])
 {
 	struct macvlan_dev *vlan = netdev_priv(dev);
 	struct macvlan_port *port;
@@ -831,13 +827,11 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
 	if (lowerdev == NULL)
 		return -ENODEV;
 
-	/* When creating macvlans on top of other macvlans - use
+	/* When creating macvlans or macvtaps on top of other macvlans - use
 	 * the real device as the lowerdev.
 	 */
-	if (lowerdev->rtnl_link_ops == dev->rtnl_link_ops) {
-		struct macvlan_dev *lowervlan = netdev_priv(lowerdev);
-		lowerdev = lowervlan->lowerdev;
-	}
+	if (netif_is_macvlan(lowerdev))
+		lowerdev = macvlan_dev_real_dev(lowerdev);
 
 	if (!tb[IFLA_MTU])
 		dev->mtu = lowerdev->mtu;
@@ -861,8 +855,6 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
 	vlan->lowerdev = lowerdev;
 	vlan->dev      = dev;
 	vlan->port     = port;
-	vlan->receive  = receive;
-	vlan->forward  = forward;
 	vlan->set_features = MACVLAN_FEATURES;
 
 	vlan->mode     = MACVLAN_MODE_VEPA;
@@ -907,9 +899,7 @@ EXPORT_SYMBOL_GPL(macvlan_common_newlink);
 static int macvlan_newlink(struct net *src_net, struct net_device *dev,
 			   struct nlattr *tb[], struct nlattr *data[])
 {
-	return macvlan_common_newlink(src_net, dev, tb, data,
-				      netif_rx,
-				      dev_forward_skb);
+	return macvlan_common_newlink(src_net, dev, tb, data);
 }
 
 void macvlan_dellink(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 2a89da080317..ff111a89e17f 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -11,7 +11,6 @@
 #include <linux/sched.h>
 #include <linux/types.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/wait.h>
 #include <linux/cdev.h>
 #include <linux/idr.h>
@@ -70,6 +69,11 @@ static const struct proto_ops macvtap_socket_ops;
 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
 
+static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
+{
+	return rcu_dereference(dev->rx_handler_data);
+}
+
 /*
  * RCU usage:
  * The macvtap_queue and the macvlan_dev are loosely coupled, the
@@ -219,7 +223,7 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
 		goto out;
 
 	/* Check if we can use flow to select a queue */
-	rxq = skb_get_rxhash(skb);
+	rxq = skb_get_hash(skb);
 	if (rxq) {
 		tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
 		goto out;
@@ -271,24 +275,27 @@ static void macvtap_del_queues(struct net_device *dev)
 		sock_put(&qlist[j]->sk);
 }
 
-/*
- * Forward happens for data that gets sent from one macvlan
- * endpoint to another one in bridge mode. We just take
- * the skb and put it into the receive queue.
- */
-static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
+static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
 {
-	struct macvlan_dev *vlan = netdev_priv(dev);
-	struct macvtap_queue *q = macvtap_get_queue(dev, skb);
+	struct sk_buff *skb = *pskb;
+	struct net_device *dev = skb->dev;
+	struct macvlan_dev *vlan;
+	struct macvtap_queue *q;
 	netdev_features_t features = TAP_FEATURES;
 
+	vlan = macvtap_get_vlan_rcu(dev);
+	if (!vlan)
+		return RX_HANDLER_PASS;
+
+	q = macvtap_get_queue(dev, skb);
 	if (!q)
-		goto drop;
+		return RX_HANDLER_PASS;
 
 	if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
 		goto drop;
 
-	skb->dev = dev;
+	skb_push(skb, ETH_HLEN);
+
 	/* Apply the forward feature mask so that we perform segmentation
 	 * according to users wishes.  This only works if VNET_HDR is
 	 * enabled.
@@ -320,22 +327,13 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
 
 wake_up:
 	wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
-	return NET_RX_SUCCESS;
+	return RX_HANDLER_CONSUMED;
 
 drop:
+	/* Count errors/drops only here, thus don't care about args. */
+	macvlan_count_rx(vlan, 0, 0, 0);
 	kfree_skb(skb);
-	return NET_RX_DROP;
-}
-
-/*
- * Receive is for data from the external interface (lowerdev),
- * in case of macvtap, we can treat that the same way as
- * forward, which macvlan cannot.
- */
-static int macvtap_receive(struct sk_buff *skb)
-{
-	skb_push(skb, ETH_HLEN);
-	return macvtap_forward(skb->dev, skb);
+	return RX_HANDLER_CONSUMED;
 }
 
 static int macvtap_get_minor(struct macvlan_dev *vlan)
@@ -385,6 +383,8 @@ static int macvtap_newlink(struct net *src_net,
 			   struct nlattr *data[])
 {
 	struct macvlan_dev *vlan = netdev_priv(dev);
+	int err;
+
 	INIT_LIST_HEAD(&vlan->queue_list);
 
 	/* Since macvlan supports all offloads by default, make
@@ -392,16 +392,20 @@ static int macvtap_newlink(struct net *src_net,
 	 */
 	vlan->tap_features = TUN_OFFLOADS;
 
+	err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan);
+	if (err)
+		return err;
+
 	/* Don't put anything that may fail after macvlan_common_newlink
 	 * because we can't undo what it does.
 	 */
-	return macvlan_common_newlink(src_net, dev, tb, data,
-				      macvtap_receive, macvtap_forward);
+	return macvlan_common_newlink(src_net, dev, tb, data);
 }
 
 static void macvtap_dellink(struct net_device *dev,
 			    struct list_head *head)
 {
+	netdev_rx_handler_unregister(dev);
 	macvtap_del_queues(dev);
 	macvlan_dellink(dev, head);
 }
@@ -588,7 +592,7 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
 	return 0;
 }
 
-static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
+static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
 				   struct virtio_net_hdr *vnet_hdr)
 {
 	memset(vnet_hdr, 0, sizeof(*vnet_hdr));
@@ -619,8 +623,6 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
 	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 		vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
 	} /* else everything is zero */
-
-	return 0;
 }
 
 /* Get packet from user space buffer */
@@ -727,9 +729,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
 		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
 	}
 	if (vlan) {
-		local_bh_disable();
-		macvlan_start_xmit(skb, vlan->dev);
-		local_bh_enable();
+		skb->dev = vlan->dev;
+		dev_queue_xmit(skb);
 	} else {
 		kfree_skb(skb);
 	}
@@ -778,9 +779,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
 		if ((len -= vnet_hdr_len) < 0)
 			return -EINVAL;
 
-		ret = macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
-		if (ret)
-			return ret;
+		macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
 
 		if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
 			return -EFAULT;
@@ -824,7 +823,7 @@ done:
 	return ret ? ret : total;
 }
 
-static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
+static ssize_t macvtap_do_read(struct macvtap_queue *q,
 			       const struct iovec *iv, unsigned long len,
 			       int noblock)
 {
@@ -875,7 +874,7 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
 		goto out;
 	}
 
-	ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
+	ret = macvtap_do_read(q, iv, len, file->f_flags & O_NONBLOCK);
 	ret = min_t(ssize_t, ret, len);
 	if (ret > 0)
 		iocb->ki_pos = ret;
@@ -1109,7 +1108,7 @@ static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
 	int ret;
 	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
 		return -EINVAL;
-	ret = macvtap_do_read(q, iocb, m->msg_iov, total_len,
+	ret = macvtap_do_read(q, m->msg_iov, total_len,
 			  flags & MSG_DONTWAIT);
 	if (ret > total_len) {
 		m->msg_flags |= MSG_TRUNC;
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index 8403316eb02b..3e027ed0b3bb 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -342,34 +342,6 @@ void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
 EXPORT_SYMBOL(mdio45_ethtool_gset_npage);
 
 /**
- * mdio45_ethtool_spauseparam_an - set auto-negotiated pause parameters
- * @mdio: MDIO interface
- * @ecmd: Ethtool request structure
- *
- * This function assumes that the PHY has an auto-negotiation MMD.  It
- * will enable and disable advertising of flow control as appropriate.
- */
-void mdio45_ethtool_spauseparam_an(const struct mdio_if_info *mdio,
-				   const struct ethtool_pauseparam *ecmd)
-{
-	int adv, old_adv;
-
-	WARN_ON(!(mdio->mmds & MDIO_DEVS_AN));
-
-	old_adv = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN,
-				  MDIO_AN_ADVERTISE);
-	adv = ((old_adv & ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) |
-	       mii_advertise_flowctrl((ecmd->rx_pause ? FLOW_CTRL_RX : 0) |
-				      (ecmd->tx_pause ? FLOW_CTRL_TX : 0)));
-	if (adv != old_adv) {
-		mdio->mdio_write(mdio->dev, mdio->prtad, MDIO_MMD_AN,
-				 MDIO_AN_ADVERTISE, adv);
-		mdio45_nway_restart(mdio);
-	}
-}
-EXPORT_SYMBOL(mdio45_ethtool_spauseparam_an);
-
-/**
  * mdio_mii_ioctl - MII ioctl interface for MDIO (clause 22 or 45) PHYs
  * @mdio: MDIO interface
  * @mii_data: MII ioctl data structure
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index 313a0377f68f..b57ce0cc9657 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -92,8 +92,8 @@ static int cis820x_config_intr(struct phy_device *phydev)
 {
 	int err;
 
-	if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
-		err = phy_write(phydev, MII_CIS8201_IMASK, 
+	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+		err = phy_write(phydev, MII_CIS8201_IMASK,
 				MII_CIS8201_IMASK_MASK);
 	else
 		err = phy_write(phydev, MII_CIS8201_IMASK, 0);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 383e8338ad86..d2c08f625a41 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -72,7 +72,7 @@ static int dm9161_config_intr(struct phy_device *phydev)
 	if (temp < 0)
 		return temp;
 
-	if(PHY_INTERRUPT_ENABLED == phydev->interrupts )
+	if (PHY_INTERRUPT_ENABLED == phydev->interrupts)
 		temp &= ~(MII_DM9161_INTR_STOP);
 	else
 		temp |= MII_DM9161_INTR_STOP;
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 7490b6c866e6..547725fa8671 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -851,8 +851,8 @@ static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
 
 	seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
 
-	return (rxts->msgtype == (*msgtype & 0xf) &&
-		rxts->seqid   == ntohs(*seqid));
+	return rxts->msgtype == (*msgtype & 0xf) &&
+		rxts->seqid   == ntohs(*seqid);
 }
 
 static void dp83640_free_clocks(void)
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index b5ddd5077a80..97bf58bf4939 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -48,7 +48,7 @@ MODULE_LICENSE("GPL");
 static int ip175c_config_init(struct phy_device *phydev)
 {
 	int err, i;
-	static int full_reset_performed = 0;
+	static int full_reset_performed;
 
 	if (full_reset_performed == 0) {
 
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index ff2e45e9cb54..9108f3191701 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -82,7 +82,7 @@ static int lxt970_config_intr(struct phy_device *phydev)
 {
 	int err;
 
-	if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
+	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
 		err = phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
 	else
 		err = phy_write(phydev, MII_LXT970_IER, 0);
@@ -114,7 +114,7 @@ static int lxt971_config_intr(struct phy_device *phydev)
 {
 	int err;
 
-	if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
+	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
 		err = phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
 	else
 		err = phy_write(phydev, MII_LXT971_IER, 0);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 2e3c778ea9bf..bd37e45c89c0 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -894,6 +894,8 @@ static struct phy_driver marvell_drivers[] = {
 		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
+		.resume = &genphy_resume,
+		.suspend = &genphy_suspend,
 		.driver = { .owner = THIS_MODULE },
 	},
 	{
@@ -907,6 +909,8 @@ static struct phy_driver marvell_drivers[] = {
 		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
+		.resume = &genphy_resume,
+		.suspend = &genphy_suspend,
 		.driver = { .owner = THIS_MODULE },
 	},
 	{
@@ -920,6 +924,8 @@ static struct phy_driver marvell_drivers[] = {
 		.read_status = &marvell_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
+		.resume = &genphy_resume,
+		.suspend = &genphy_suspend,
 		.driver = { .owner = THIS_MODULE },
 	},
 	{
@@ -933,6 +939,8 @@ static struct phy_driver marvell_drivers[] = {
 		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
+		.resume = &genphy_resume,
+		.suspend = &genphy_suspend,
 		.driver = {.owner = THIS_MODULE,},
 	},
 	{
@@ -946,6 +954,8 @@ static struct phy_driver marvell_drivers[] = {
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
 		.did_interrupt = &m88e1121_did_interrupt,
+		.resume = &genphy_resume,
+		.suspend = &genphy_suspend,
 		.driver = { .owner = THIS_MODULE },
 	},
 	{
@@ -961,6 +971,8 @@ static struct phy_driver marvell_drivers[] = {
 		.did_interrupt = &m88e1121_did_interrupt,
 		.get_wol = &m88e1318_get_wol,
 		.set_wol = &m88e1318_set_wol,
+		.resume = &genphy_resume,
+		.suspend = &genphy_suspend,
 		.driver = { .owner = THIS_MODULE },
 	},
 	{
@@ -974,6 +986,8 @@ static struct phy_driver marvell_drivers[] = {
 		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
+		.resume = &genphy_resume,
+		.suspend = &genphy_suspend,
 		.driver = { .owner = THIS_MODULE },
 	},
 	{
@@ -987,6 +1001,8 @@ static struct phy_driver marvell_drivers[] = {
 		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
+		.resume = &genphy_resume,
+		.suspend = &genphy_suspend,
 		.driver = { .owner = THIS_MODULE },
 	},
 	{
@@ -1000,6 +1016,8 @@ static struct phy_driver marvell_drivers[] = {
 		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
+		.resume = &genphy_resume,
+		.suspend = &genphy_suspend,
 		.driver = { .owner = THIS_MODULE },
 	},
 	{
@@ -1013,6 +1031,8 @@ static struct phy_driver marvell_drivers[] = {
 		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
+		.resume = &genphy_resume,
+		.suspend = &genphy_suspend,
 		.driver = { .owner = THIS_MODULE },
 	},
 	{
@@ -1026,6 +1046,8 @@ static struct phy_driver marvell_drivers[] = {
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
 		.did_interrupt = &m88e1121_did_interrupt,
+		.resume = &genphy_resume,
+		.suspend = &genphy_suspend,
 		.driver = { .owner = THIS_MODULE },
 	},
 };
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 8004acbef2c9..e701433bf52f 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -22,7 +22,6 @@
 
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
diff --git a/drivers/net/phy/mdio-moxart.c b/drivers/net/phy/mdio-moxart.c
index a5741cb0304e..f1fc51f655d9 100644
--- a/drivers/net/phy/mdio-moxart.c
+++ b/drivers/net/phy/mdio-moxart.c
@@ -8,7 +8,6 @@
  */
 
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index d2dd9e473e2c..096695163491 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -10,7 +10,6 @@
 #include <linux/device.h>
 #include <linux/of_mdio.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/phy.h>
 #include <linux/mdio-mux.h>
 #include <linux/of_gpio.h>
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
index f8e305d8da76..1656785ff339 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -15,7 +15,6 @@
 #include <linux/of_address.h>
 #include <linux/of_mdio.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/phy.h>
 #include <linux/mdio-mux.h>
 
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index 6aee02ed97ac..a51ed92fbada 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -10,7 +10,6 @@
 #include <linux/of_mdio.h>
 #include <linux/delay.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/gfp.h>
 #include <linux/phy.h>
 #include <linux/io.h>
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 18969b3ad8bb..bb88bc7d81fb 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -13,7 +13,6 @@
  */
 
 #include <linux/delay.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 56178761ce93..930694d3a13f 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -1,7 +1,4 @@
-/*
- * drivers/net/phy/mdio_bus.c
- *
- * MDIO Bus interface
+/* MDIO Bus interface
  *
  * Author: Andy Fleming
  *
@@ -36,10 +33,10 @@
 #include <linux/mii.h>
 #include <linux/ethtool.h>
 #include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
 
-#include <asm/io.h>
 #include <asm/irq.h>
-#include <asm/uaccess.h>
 
 /**
  * mdiobus_alloc_size - allocate a mii_bus structure
@@ -139,8 +136,7 @@ int mdiobus_register(struct mii_bus *bus)
 	int i, err;
 
 	if (NULL == bus || NULL == bus->name ||
-			NULL == bus->read ||
-			NULL == bus->write)
+	    NULL == bus->read || NULL == bus->write)
 		return -EINVAL;
 
 	BUG_ON(bus->state != MDIOBUS_ALLOCATED &&
@@ -214,9 +210,7 @@ EXPORT_SYMBOL(mdiobus_unregister);
  */
 void mdiobus_free(struct mii_bus *bus)
 {
-	/*
-	 * For compatibility with error handling in drivers.
-	 */
+	/* For compatibility with error handling in drivers. */
 	if (bus->state == MDIOBUS_ALLOCATED) {
 		kfree(bus);
 		return;
@@ -316,8 +310,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
 	if (phydrv->match_phy_device)
 		return phydrv->match_phy_device(phydev);
 
-	return ((phydrv->phy_id & phydrv->phy_id_mask) ==
-		(phydev->phy_id & phydrv->phy_id_mask));
+	return (phydrv->phy_id & phydrv->phy_id_mask) ==
+		(phydev->phy_id & phydrv->phy_id_mask);
 }
 
 #ifdef CONFIG_PM
@@ -335,15 +329,13 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
 	if (!netdev)
 		return true;
 
-	/*
-	 * Don't suspend PHY if the attched netdev parent may wakeup.
+	/* Don't suspend PHY if the attched netdev parent may wakeup.
 	 * The parent may point to a PCI device, as in tg3 driver.
 	 */
 	if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent))
 		return false;
 
-	/*
-	 * Also don't suspend PHY if the netdev itself may wakeup. This
+	/* Also don't suspend PHY if the netdev itself may wakeup. This
 	 * is the case for devices w/o underlaying pwr. mgmt. aware bus,
 	 * e.g. SoC devices.
 	 */
@@ -358,8 +350,7 @@ static int mdio_bus_suspend(struct device *dev)
 	struct phy_driver *phydrv = to_phy_driver(dev->driver);
 	struct phy_device *phydev = to_phy_device(dev);
 
-	/*
-	 * We must stop the state machine manually, otherwise it stops out of
+	/* We must stop the state machine manually, otherwise it stops out of
 	 * control, possibly with the phydev->lock held. Upon resume, netdev
 	 * may call phy routines that try to grab the same lock, and that may
 	 * lead to a deadlock.
@@ -388,7 +379,7 @@ static int mdio_bus_resume(struct device *dev)
 
 no_resume:
 	if (phydev->attached_dev && phydev->adjust_link)
-		phy_start_machine(phydev, NULL);
+		phy_start_machine(phydev);
 
 	return 0;
 }
@@ -410,12 +401,12 @@ static int mdio_bus_restore(struct device *dev)
 	phydev->link = 0;
 	phydev->state = PHY_UP;
 
-	phy_start_machine(phydev, NULL);
+	phy_start_machine(phydev);
 
 	return 0;
 }
 
-static struct dev_pm_ops mdio_bus_pm_ops = {
+static const struct dev_pm_ops mdio_bus_pm_ops = {
 	.suspend = mdio_bus_suspend,
 	.resume = mdio_bus_resume,
 	.freeze = mdio_bus_suspend,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 26fa05a472b4..5a8993b0cafc 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -81,14 +81,14 @@ static int ksz_config_flags(struct phy_device *phydev)
 }
 
 static int kszphy_extended_write(struct phy_device *phydev,
-                                 u32 regnum, u16 val)
+				u32 regnum, u16 val)
 {
 	phy_write(phydev, MII_KSZPHY_EXTREG, KSZPHY_EXTREG_WRITE | regnum);
 	return phy_write(phydev, MII_KSZPHY_EXTREG_WRITE, val);
 }
 
 static int kszphy_extended_read(struct phy_device *phydev,
-                                 u32 regnum)
+				u32 regnum)
 {
 	phy_write(phydev, MII_KSZPHY_EXTREG, regnum);
 	return phy_read(phydev, MII_KSZPHY_EXTREG_READ);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 98434b84f041..19c9eca0ef26 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1,7 +1,4 @@
-/*
- * drivers/net/phy/phy.c
- *
- * Framework for configuring and reading PHY devices
+/* Framework for configuring and reading PHY devices
  * Based on code in sungem_phy.c and gianfar_phy.c
  *
  * Author: Andy Fleming
@@ -23,7 +20,6 @@
 #include <linux/errno.h>
 #include <linux/unistd.h>
 #include <linux/interrupt.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -36,11 +32,11 @@
 #include <linux/timer.h>
 #include <linux/workqueue.h>
 #include <linux/mdio.h>
-
+#include <linux/io.h>
+#include <linux/uaccess.h>
 #include <linux/atomic.h>
-#include <asm/io.h>
+
 #include <asm/irq.h>
-#include <asm/uaccess.h>
 
 /**
  * phy_print_status - Convenience function to print out the current phy status
@@ -48,13 +44,14 @@
  */
 void phy_print_status(struct phy_device *phydev)
 {
-	if (phydev->link)
+	if (phydev->link) {
 		pr_info("%s - Link is Up - %d/%s\n",
 			dev_name(&phydev->dev),
 			phydev->speed,
 			DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
-	else
+	} else	{
 		pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
+	}
 }
 EXPORT_SYMBOL(phy_print_status);
 
@@ -69,12 +66,10 @@ EXPORT_SYMBOL(phy_print_status);
  */
 static int phy_clear_interrupt(struct phy_device *phydev)
 {
-	int err = 0;
-
 	if (phydev->drv->ack_interrupt)
-		err = phydev->drv->ack_interrupt(phydev);
+		return phydev->drv->ack_interrupt(phydev);
 
-	return err;
+	return 0;
 }
 
 /**
@@ -86,13 +81,11 @@ static int phy_clear_interrupt(struct phy_device *phydev)
  */
 static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
 {
-	int err = 0;
-
 	phydev->interrupts = interrupts;
 	if (phydev->drv->config_intr)
-		err = phydev->drv->config_intr(phydev);
+		return phydev->drv->config_intr(phydev);
 
-	return err;
+	return 0;
 }
 
 
@@ -106,15 +99,14 @@ static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
  */
 static inline int phy_aneg_done(struct phy_device *phydev)
 {
-	int retval;
-
-	retval = phy_read(phydev, MII_BMSR);
+	int retval = phy_read(phydev, MII_BMSR);
 
 	return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
 }
 
 /* A structure for mapping a particular speed and duplex
- * combination to a particular SUPPORTED and ADVERTISED value */
+ * combination to a particular SUPPORTED and ADVERTISED value
+ */
 struct phy_setting {
 	int speed;
 	int duplex;
@@ -177,8 +169,7 @@ static inline int phy_find_setting(int speed, int duplex)
 	int idx = 0;
 
 	while (idx < ARRAY_SIZE(settings) &&
-			(settings[idx].speed != speed ||
-			settings[idx].duplex != duplex))
+	       (settings[idx].speed != speed || settings[idx].duplex != duplex))
 		idx++;
 
 	return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
@@ -245,8 +236,7 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
 	if (cmd->phy_address != phydev->addr)
 		return -EINVAL;
 
-	/* We make sure that we don't pass unsupported
-	 * values in to the PHY */
+	/* We make sure that we don't pass unsupported values in to the PHY */
 	cmd->advertising &= phydev->supported;
 
 	/* Verify the settings we care about. */
@@ -289,6 +279,7 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
 	cmd->supported = phydev->supported;
 
 	cmd->advertising = phydev->advertising;
+	cmd->lp_advertising = phydev->lp_advertising;
 
 	ethtool_cmd_speed_set(cmd, phydev->speed);
 	cmd->duplex = phydev->duplex;
@@ -312,8 +303,7 @@ EXPORT_SYMBOL(phy_ethtool_gset);
  * PHYCONTROL layer.  It changes registers without regard to
  * current state.  Use at own risk.
  */
-int phy_mii_ioctl(struct phy_device *phydev,
-		struct ifreq *ifr, int cmd)
+int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
 {
 	struct mii_ioctl_data *mii_data = if_mii(ifr);
 	u16 val = mii_data->val_in;
@@ -326,25 +316,24 @@ int phy_mii_ioctl(struct phy_device *phydev,
 	case SIOCGMIIREG:
 		mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id,
 						 mii_data->reg_num);
-		break;
+		return 0;
 
 	case SIOCSMIIREG:
 		if (mii_data->phy_id == phydev->addr) {
-			switch(mii_data->reg_num) {
+			switch (mii_data->reg_num) {
 			case MII_BMCR:
-				if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0)
+				if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0)
 					phydev->autoneg = AUTONEG_DISABLE;
 				else
 					phydev->autoneg = AUTONEG_ENABLE;
-				if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
+				if (!phydev->autoneg && (val & BMCR_FULLDPLX))
 					phydev->duplex = DUPLEX_FULL;
 				else
 					phydev->duplex = DUPLEX_HALF;
-				if ((!phydev->autoneg) &&
-						(val & BMCR_SPEED1000))
+				if (!phydev->autoneg && (val & BMCR_SPEED1000))
 					phydev->speed = SPEED_1000;
-				else if ((!phydev->autoneg) &&
-						(val & BMCR_SPEED100))
+				else if (!phydev->autoneg &&
+					 (val & BMCR_SPEED100))
 					phydev->speed = SPEED_100;
 				break;
 			case MII_ADVERTISE:
@@ -360,12 +349,9 @@ int phy_mii_ioctl(struct phy_device *phydev,
 			      mii_data->reg_num, val);
 
 		if (mii_data->reg_num == MII_BMCR &&
-		    val & BMCR_RESET &&
-		    phydev->drv->config_init) {
-			phy_scan_fixups(phydev);
-			phydev->drv->config_init(phydev);
-		}
-		break;
+		    val & BMCR_RESET)
+			return phy_init_hw(phydev);
+		return 0;
 
 	case SIOCSHWTSTAMP:
 		if (phydev->drv->hwtstamp)
@@ -375,8 +361,6 @@ int phy_mii_ioctl(struct phy_device *phydev,
 	default:
 		return -EOPNOTSUPP;
 	}
-
-	return 0;
 }
 EXPORT_SYMBOL(phy_mii_ioctl);
 
@@ -399,7 +383,6 @@ int phy_start_aneg(struct phy_device *phydev)
 		phy_sanitize_settings(phydev);
 
 	err = phydev->drv->config_aneg(phydev);
-
 	if (err < 0)
 		goto out_unlock;
 
@@ -419,25 +402,18 @@ out_unlock:
 }
 EXPORT_SYMBOL(phy_start_aneg);
 
-
 /**
  * phy_start_machine - start PHY state machine tracking
  * @phydev: the phy_device struct
- * @handler: callback function for state change notifications
  *
  * Description: The PHY infrastructure can run a state machine
  *   which tracks whether the PHY is starting up, negotiating,
  *   etc.  This function starts the timer which tracks the state
- *   of the PHY.  If you want to be notified when the state changes,
- *   pass in the callback @handler, otherwise, pass NULL.  If you
- *   want to maintain your own state machine, do not call this
- *   function.
+ *   of the PHY.  If you want to maintain your own state machine,
+ *   do not call this function.
  */
-void phy_start_machine(struct phy_device *phydev,
-		void (*handler)(struct net_device *))
+void phy_start_machine(struct phy_device *phydev)
 {
-	phydev->adjust_state = handler;
-
 	queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
 }
 
@@ -457,8 +433,6 @@ void phy_stop_machine(struct phy_device *phydev)
 	if (phydev->state > PHY_UP)
 		phydev->state = PHY_UP;
 	mutex_unlock(&phydev->lock);
-
-	phydev->adjust_state = NULL;
 }
 
 /**
@@ -495,7 +469,8 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
 	/* The MDIO bus is not allowed to be written in interrupt
 	 * context, so we need to disable the irq here.  A work
 	 * queue will write the PHY to disable and clear the
-	 * interrupt, and then reenable the irq line. */
+	 * interrupt, and then reenable the irq line.
+	 */
 	disable_irq_nosync(irq);
 	atomic_inc(&phydev->irq_disable);
 
@@ -510,16 +485,12 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
  */
 static int phy_enable_interrupts(struct phy_device *phydev)
 {
-	int err;
-
-	err = phy_clear_interrupt(phydev);
+	int err = phy_clear_interrupt(phydev);
 
 	if (err < 0)
 		return err;
 
-	err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
-
-	return err;
+	return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
 }
 
 /**
@@ -532,13 +503,11 @@ static int phy_disable_interrupts(struct phy_device *phydev)
 
 	/* Disable PHY interrupts */
 	err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
-
 	if (err)
 		goto phy_err;
 
 	/* Clear the interrupt */
 	err = phy_clear_interrupt(phydev);
-
 	if (err)
 		goto phy_err;
 
@@ -562,8 +531,6 @@ phy_err:
  */
 int phy_start_interrupts(struct phy_device *phydev)
 {
-	int err = 0;
-
 	atomic_set(&phydev->irq_disable, 0);
 	if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
 			phydev) < 0) {
@@ -573,9 +540,7 @@ int phy_start_interrupts(struct phy_device *phydev)
 		return 0;
 	}
 
-	err = phy_enable_interrupts(phydev);
-
-	return err;
+	return phy_enable_interrupts(phydev);
 }
 EXPORT_SYMBOL(phy_start_interrupts);
 
@@ -585,24 +550,20 @@ EXPORT_SYMBOL(phy_start_interrupts);
  */
 int phy_stop_interrupts(struct phy_device *phydev)
 {
-	int err;
-
-	err = phy_disable_interrupts(phydev);
+	int err = phy_disable_interrupts(phydev);
 
 	if (err)
 		phy_error(phydev);
 
 	free_irq(phydev->irq, phydev);
 
-	/*
-	 * Cannot call flush_scheduled_work() here as desired because
+	/* Cannot call flush_scheduled_work() here as desired because
 	 * of rtnl_lock(), but we do not really care about what would
 	 * be done, except from enable_irq(), so cancel any work
 	 * possibly pending and take care of the matter below.
 	 */
 	cancel_work_sync(&phydev->phy_queue);
-	/*
-	 * If work indeed has been cancelled, disable_irq() will have
+	/* If work indeed has been cancelled, disable_irq() will have
 	 * been left unbalanced from phy_interrupt() and enable_irq()
 	 * has to be called so that other devices on the line work.
 	 */
@@ -613,14 +574,12 @@ int phy_stop_interrupts(struct phy_device *phydev)
 }
 EXPORT_SYMBOL(phy_stop_interrupts);
 
-
 /**
  * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
  * @work: work_struct that describes the work to be done
  */
 void phy_change(struct work_struct *work)
 {
-	int err;
 	struct phy_device *phydev =
 		container_of(work, struct phy_device, phy_queue);
 
@@ -628,9 +587,7 @@ void phy_change(struct work_struct *work)
 	    !phydev->drv->did_interrupt(phydev))
 		goto ignore;
 
-	err = phy_disable_interrupts(phydev);
-
-	if (err)
+	if (phy_disable_interrupts(phydev))
 		goto phy_err;
 
 	mutex_lock(&phydev->lock);
@@ -642,16 +599,13 @@ void phy_change(struct work_struct *work)
 	enable_irq(phydev->irq);
 
 	/* Reenable interrupts */
-	if (PHY_HALTED != phydev->state)
-		err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
-
-	if (err)
+	if (PHY_HALTED != phydev->state &&
+	    phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
 		goto irq_enable_err;
 
 	/* reschedule state queue work to run as soon as possible */
 	cancel_delayed_work_sync(&phydev->state_queue);
 	queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
-
 	return;
 
 ignore:
@@ -690,13 +644,12 @@ void phy_stop(struct phy_device *phydev)
 out_unlock:
 	mutex_unlock(&phydev->lock);
 
-	/*
-	 * Cannot call flush_scheduled_work() here as desired because
+	/* Cannot call flush_scheduled_work() here as desired because
 	 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
 	 * will not reenable interrupts.
 	 */
 }
-
+EXPORT_SYMBOL(phy_stop);
 
 /**
  * phy_start - start or restart a PHY device
@@ -713,20 +666,19 @@ void phy_start(struct phy_device *phydev)
 	mutex_lock(&phydev->lock);
 
 	switch (phydev->state) {
-		case PHY_STARTING:
-			phydev->state = PHY_PENDING;
-			break;
-		case PHY_READY:
-			phydev->state = PHY_UP;
-			break;
-		case PHY_HALTED:
-			phydev->state = PHY_RESUMING;
-		default:
-			break;
+	case PHY_STARTING:
+		phydev->state = PHY_PENDING;
+		break;
+	case PHY_READY:
+		phydev->state = PHY_UP;
+		break;
+	case PHY_HALTED:
+		phydev->state = PHY_RESUMING;
+	default:
+		break;
 	}
 	mutex_unlock(&phydev->lock);
 }
-EXPORT_SYMBOL(phy_stop);
 EXPORT_SYMBOL(phy_start);
 
 /**
@@ -738,160 +690,132 @@ void phy_state_machine(struct work_struct *work)
 	struct delayed_work *dwork = to_delayed_work(work);
 	struct phy_device *phydev =
 			container_of(dwork, struct phy_device, state_queue);
-	int needs_aneg = 0;
+	int needs_aneg = 0, do_suspend = 0;
 	int err = 0;
 
 	mutex_lock(&phydev->lock);
 
-	if (phydev->adjust_state)
-		phydev->adjust_state(phydev->attached_dev);
+	switch (phydev->state) {
+	case PHY_DOWN:
+	case PHY_STARTING:
+	case PHY_READY:
+	case PHY_PENDING:
+		break;
+	case PHY_UP:
+		needs_aneg = 1;
 
-	switch(phydev->state) {
-		case PHY_DOWN:
-		case PHY_STARTING:
-		case PHY_READY:
-		case PHY_PENDING:
-			break;
-		case PHY_UP:
-			needs_aneg = 1;
+		phydev->link_timeout = PHY_AN_TIMEOUT;
 
-			phydev->link_timeout = PHY_AN_TIMEOUT;
+		break;
+	case PHY_AN:
+		err = phy_read_status(phydev);
+		if (err < 0)
+			break;
 
+		/* If the link is down, give up on negotiation for now */
+		if (!phydev->link) {
+			phydev->state = PHY_NOLINK;
+			netif_carrier_off(phydev->attached_dev);
+			phydev->adjust_link(phydev->attached_dev);
 			break;
-		case PHY_AN:
-			err = phy_read_status(phydev);
+		}
 
-			if (err < 0)
-				break;
+		/* Check if negotiation is done.  Break if there's an error */
+		err = phy_aneg_done(phydev);
+		if (err < 0)
+			break;
 
-			/* If the link is down, give up on
-			 * negotiation for now */
-			if (!phydev->link) {
-				phydev->state = PHY_NOLINK;
-				netif_carrier_off(phydev->attached_dev);
-				phydev->adjust_link(phydev->attached_dev);
-				break;
-			}
+		/* If AN is done, we're running */
+		if (err > 0) {
+			phydev->state = PHY_RUNNING;
+			netif_carrier_on(phydev->attached_dev);
+			phydev->adjust_link(phydev->attached_dev);
 
-			/* Check if negotiation is done.  Break
-			 * if there's an error */
-			err = phy_aneg_done(phydev);
-			if (err < 0)
+		} else if (0 == phydev->link_timeout--) {
+			needs_aneg = 1;
+			/* If we have the magic_aneg bit, we try again */
+			if (phydev->drv->flags & PHY_HAS_MAGICANEG)
 				break;
-
-			/* If AN is done, we're running */
-			if (err > 0) {
-				phydev->state = PHY_RUNNING;
-				netif_carrier_on(phydev->attached_dev);
-				phydev->adjust_link(phydev->attached_dev);
-
-			} else if (0 == phydev->link_timeout--) {
-				needs_aneg = 1;
-				/* If we have the magic_aneg bit,
-				 * we try again */
-				if (phydev->drv->flags & PHY_HAS_MAGICANEG)
-					break;
-			}
+		}
+		break;
+	case PHY_NOLINK:
+		err = phy_read_status(phydev);
+		if (err)
 			break;
-		case PHY_NOLINK:
-			err = phy_read_status(phydev);
-
-			if (err)
-				break;
 
-			if (phydev->link) {
-				phydev->state = PHY_RUNNING;
-				netif_carrier_on(phydev->attached_dev);
-				phydev->adjust_link(phydev->attached_dev);
-			}
+		if (phydev->link) {
+			phydev->state = PHY_RUNNING;
+			netif_carrier_on(phydev->attached_dev);
+			phydev->adjust_link(phydev->attached_dev);
+		}
+		break;
+	case PHY_FORCING:
+		err = genphy_update_link(phydev);
+		if (err)
 			break;
-		case PHY_FORCING:
-			err = genphy_update_link(phydev);
-
-			if (err)
-				break;
 
-			if (phydev->link) {
-				phydev->state = PHY_RUNNING;
-				netif_carrier_on(phydev->attached_dev);
-			} else {
-				if (0 == phydev->link_timeout--)
-					needs_aneg = 1;
-			}
+		if (phydev->link) {
+			phydev->state = PHY_RUNNING;
+			netif_carrier_on(phydev->attached_dev);
+		} else {
+			if (0 == phydev->link_timeout--)
+				needs_aneg = 1;
+		}
 
-			phydev->adjust_link(phydev->attached_dev);
-			break;
-		case PHY_RUNNING:
-			/* Only register a CHANGE if we are
-			 * polling or ignoring interrupts
-			 */
-			if (!phy_interrupt_is_valid(phydev))
-				phydev->state = PHY_CHANGELINK;
+		phydev->adjust_link(phydev->attached_dev);
+		break;
+	case PHY_RUNNING:
+		/* Only register a CHANGE if we are
+		 * polling or ignoring interrupts
+		 */
+		if (!phy_interrupt_is_valid(phydev))
+			phydev->state = PHY_CHANGELINK;
+		break;
+	case PHY_CHANGELINK:
+		err = phy_read_status(phydev);
+		if (err)
 			break;
-		case PHY_CHANGELINK:
-			err = phy_read_status(phydev);
 
-			if (err)
-				break;
+		if (phydev->link) {
+			phydev->state = PHY_RUNNING;
+			netif_carrier_on(phydev->attached_dev);
+		} else {
+			phydev->state = PHY_NOLINK;
+			netif_carrier_off(phydev->attached_dev);
+		}
 
-			if (phydev->link) {
-				phydev->state = PHY_RUNNING;
-				netif_carrier_on(phydev->attached_dev);
-			} else {
-				phydev->state = PHY_NOLINK;
-				netif_carrier_off(phydev->attached_dev);
-			}
+		phydev->adjust_link(phydev->attached_dev);
 
+		if (phy_interrupt_is_valid(phydev))
+			err = phy_config_interrupt(phydev,
+						   PHY_INTERRUPT_ENABLED);
+		break;
+	case PHY_HALTED:
+		if (phydev->link) {
+			phydev->link = 0;
+			netif_carrier_off(phydev->attached_dev);
 			phydev->adjust_link(phydev->attached_dev);
-
-			if (phy_interrupt_is_valid(phydev))
-				err = phy_config_interrupt(phydev,
-						PHY_INTERRUPT_ENABLED);
-			break;
-		case PHY_HALTED:
-			if (phydev->link) {
-				phydev->link = 0;
-				netif_carrier_off(phydev->attached_dev);
-				phydev->adjust_link(phydev->attached_dev);
-			}
+			do_suspend = 1;
+		}
+		break;
+	case PHY_RESUMING:
+		err = phy_clear_interrupt(phydev);
+		if (err)
 			break;
-		case PHY_RESUMING:
-
-			err = phy_clear_interrupt(phydev);
 
-			if (err)
-				break;
-
-			err = phy_config_interrupt(phydev,
-					PHY_INTERRUPT_ENABLED);
+		err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
+		if (err)
+			break;
 
-			if (err)
+		if (AUTONEG_ENABLE == phydev->autoneg) {
+			err = phy_aneg_done(phydev);
+			if (err < 0)
 				break;
 
-			if (AUTONEG_ENABLE == phydev->autoneg) {
-				err = phy_aneg_done(phydev);
-				if (err < 0)
-					break;
-
-				/* err > 0 if AN is done.
-				 * Otherwise, it's 0, and we're
-				 * still waiting for AN */
-				if (err > 0) {
-					err = phy_read_status(phydev);
-					if (err)
-						break;
-
-					if (phydev->link) {
-						phydev->state = PHY_RUNNING;
-						netif_carrier_on(phydev->attached_dev);
-					} else
-						phydev->state = PHY_NOLINK;
-					phydev->adjust_link(phydev->attached_dev);
-				} else {
-					phydev->state = PHY_AN;
-					phydev->link_timeout = PHY_AN_TIMEOUT;
-				}
-			} else {
+			/* err > 0 if AN is done.
+			 * Otherwise, it's 0, and we're  still waiting for AN
+			 */
+			if (err > 0) {
 				err = phy_read_status(phydev);
 				if (err)
 					break;
@@ -899,11 +823,28 @@ void phy_state_machine(struct work_struct *work)
 				if (phydev->link) {
 					phydev->state = PHY_RUNNING;
 					netif_carrier_on(phydev->attached_dev);
-				} else
+				} else	{
 					phydev->state = PHY_NOLINK;
+				}
 				phydev->adjust_link(phydev->attached_dev);
+			} else {
+				phydev->state = PHY_AN;
+				phydev->link_timeout = PHY_AN_TIMEOUT;
 			}
-			break;
+		} else {
+			err = phy_read_status(phydev);
+			if (err)
+				break;
+
+			if (phydev->link) {
+				phydev->state = PHY_RUNNING;
+				netif_carrier_on(phydev->attached_dev);
+			} else	{
+				phydev->state = PHY_NOLINK;
+			}
+			phydev->adjust_link(phydev->attached_dev);
+		}
+		break;
 	}
 
 	mutex_unlock(&phydev->lock);
@@ -911,11 +852,14 @@ void phy_state_machine(struct work_struct *work)
 	if (needs_aneg)
 		err = phy_start_aneg(phydev);
 
+	if (do_suspend)
+		phy_suspend(phydev);
+
 	if (err < 0)
 		phy_error(phydev);
 
 	queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
-			PHY_STATE_TIME * HZ);
+			   PHY_STATE_TIME * HZ);
 }
 
 void phy_mac_interrupt(struct phy_device *phydev, int new_link)
@@ -957,14 +901,10 @@ static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
 static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
 				 int addr)
 {
-	u32 ret;
-
 	mmd_phy_indirect(bus, prtad, devad, addr);
 
 	/* Read the content of the MMD's selected register */
-	ret = bus->read(bus, addr, MII_MMD_DATA);
-
-	return ret;
+	return bus->read(bus, addr, MII_MMD_DATA);
 }
 
 /**
@@ -1004,8 +944,6 @@ static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
  */
 int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
 {
-	int ret = -EPROTONOSUPPORT;
-
 	/* According to 802.3az,the EEE is supported only in full duplex-mode.
 	 * Also EEE feature is active when core is operating with MII, GMII
 	 * or RGMII.
@@ -1031,7 +969,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
 
 		cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
 		if (!cap)
-			goto eee_exit;
+			return -EPROTONOSUPPORT;
 
 		/* Check which link settings negotiated and verify it in
 		 * the EEE advertising registers.
@@ -1050,7 +988,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
 		lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
 		idx = phy_find_setting(phydev->speed, phydev->duplex);
 		if (!(lp & adv & settings[idx].setting))
-			goto eee_exit;
+			return -EPROTONOSUPPORT;
 
 		if (clk_stop_enable) {
 			/* Configure the PHY to stop receiving xMII
@@ -1067,11 +1005,10 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
 					       MDIO_MMD_PCS, phydev->addr, val);
 		}
 
-		ret = 0; /* EEE supported */
+		return 0; /* EEE supported */
 	}
 
-eee_exit:
-	return ret;
+	return -EPROTONOSUPPORT;
 }
 EXPORT_SYMBOL(phy_init_eee);
 
@@ -1086,7 +1023,6 @@ int phy_get_eee_err(struct phy_device *phydev)
 {
 	return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR,
 				     MDIO_MMD_PCS, phydev->addr);
-
 }
 EXPORT_SYMBOL(phy_get_eee_err);
 
@@ -1136,9 +1072,8 @@ EXPORT_SYMBOL(phy_ethtool_get_eee);
  */
 int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
 {
-	int val;
+	int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
 
-	val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
 	phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
 			       phydev->addr, val);
 
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index d6447b3f7409..4b03e63639b7 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1,7 +1,4 @@
-/*
- * drivers/net/phy/phy_device.c
- *
- * Framework for finding and configuring PHYs.
+/* Framework for finding and configuring PHYs.
  * Also contains generic PHY driver
  *
  * Author: Andy Fleming
@@ -33,10 +30,11 @@
 #include <linux/mii.h>
 #include <linux/ethtool.h>
 #include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
 
-#include <asm/io.h>
 #include <asm/irq.h>
-#include <asm/uaccess.h>
 
 MODULE_DESCRIPTION("PHY library");
 MODULE_AUTHOR("Andy Fleming");
@@ -53,31 +51,31 @@ static void phy_device_release(struct device *dev)
 	kfree(to_phy_device(dev));
 }
 
-static struct phy_driver genphy_driver;
-extern int mdio_bus_init(void);
-extern void mdio_bus_exit(void);
+enum genphy_driver {
+	GENPHY_DRV_1G,
+	GENPHY_DRV_10G,
+	GENPHY_DRV_MAX
+};
+
+static struct phy_driver genphy_driver[GENPHY_DRV_MAX];
 
 static LIST_HEAD(phy_fixup_list);
 static DEFINE_MUTEX(phy_fixup_lock);
 
-static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
-			     u32 flags, phy_interface_t interface);
-
-/*
- * Creates a new phy_fixup and adds it to the list
+/**
+ * phy_register_fixup - creates a new phy_fixup and adds it to the list
  * @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID)
  * @phy_uid: Used to match against phydev->phy_id (the UID of the PHY)
- * 	It can also be PHY_ANY_UID
+ *	It can also be PHY_ANY_UID
  * @phy_uid_mask: Applied to phydev->phy_id and fixup->phy_uid before
- * 	comparison
+ *	comparison
  * @run: The actual code to be run when a matching PHY is found
  */
 int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
-		int (*run)(struct phy_device *))
+		       int (*run)(struct phy_device *))
 {
-	struct phy_fixup *fixup;
+	struct phy_fixup *fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
 
-	fixup = kzalloc(sizeof(struct phy_fixup), GFP_KERNEL);
 	if (!fixup)
 		return -ENOMEM;
 
@@ -96,7 +94,7 @@ EXPORT_SYMBOL(phy_register_fixup);
 
 /* Registers a fixup to be run on any PHY with the UID in phy_uid */
 int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
-		int (*run)(struct phy_device *))
+			       int (*run)(struct phy_device *))
 {
 	return phy_register_fixup(PHY_ANY_ID, phy_uid, phy_uid_mask, run);
 }
@@ -104,14 +102,13 @@ EXPORT_SYMBOL(phy_register_fixup_for_uid);
 
 /* Registers a fixup to be run on the PHY with id string bus_id */
 int phy_register_fixup_for_id(const char *bus_id,
-		int (*run)(struct phy_device *))
+			      int (*run)(struct phy_device *))
 {
 	return phy_register_fixup(bus_id, PHY_ANY_UID, 0xffffffff, run);
 }
 EXPORT_SYMBOL(phy_register_fixup_for_id);
 
-/*
- * Returns 1 if fixup matches phydev in bus_id and phy_uid.
+/* Returns 1 if fixup matches phydev in bus_id and phy_uid.
  * Fixups can be set to match any in one or more fields.
  */
 static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup)
@@ -121,7 +118,7 @@ static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup)
 			return 0;
 
 	if ((fixup->phy_uid & fixup->phy_uid_mask) !=
-			(phydev->phy_id & fixup->phy_uid_mask))
+	    (phydev->phy_id & fixup->phy_uid_mask))
 		if (fixup->phy_uid != PHY_ANY_UID)
 			return 0;
 
@@ -129,16 +126,14 @@ static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup)
 }
 
 /* Runs any matching fixups for this phydev */
-int phy_scan_fixups(struct phy_device *phydev)
+static int phy_scan_fixups(struct phy_device *phydev)
 {
 	struct phy_fixup *fixup;
 
 	mutex_lock(&phy_fixup_lock);
 	list_for_each_entry(fixup, &phy_fixup_list, list) {
 		if (phy_needs_fixup(phydev, fixup)) {
-			int err;
-
-			err = fixup->run(phydev);
+			int err = fixup->run(phydev);
 
 			if (err < 0) {
 				mutex_unlock(&phy_fixup_lock);
@@ -150,25 +145,24 @@ int phy_scan_fixups(struct phy_device *phydev)
 
 	return 0;
 }
-EXPORT_SYMBOL(phy_scan_fixups);
 
 struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
-			bool is_c45, struct phy_c45_device_ids *c45_ids)
+				     bool is_c45,
+				     struct phy_c45_device_ids *c45_ids)
 {
 	struct phy_device *dev;
 
-	/* We allocate the device, and initialize the
-	 * default values */
+	/* We allocate the device, and initialize the default values */
 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-
 	if (NULL == dev)
-		return (struct phy_device*) PTR_ERR((void*)-ENOMEM);
+		return (struct phy_device *)PTR_ERR((void *)-ENOMEM);
 
 	dev->dev.release = phy_device_release;
 
 	dev->speed = 0;
 	dev->duplex = -1;
-	dev->pause = dev->asym_pause = 0;
+	dev->pause = 0;
+	dev->asym_pause = 0;
 	dev->link = 1;
 	dev->interface = PHY_INTERFACE_MODE_GMII;
 
@@ -192,14 +186,15 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
 	INIT_WORK(&dev->phy_queue, phy_change);
 
 	/* Request the appropriate module unconditionally; don't
-	   bother trying to do so only if it isn't already loaded,
-	   because that gets complicated. A hotplug event would have
-	   done an unconditional modprobe anyway.
-	   We don't do normal hotplug because it won't work for MDIO
-	   -- because it relies on the device staying around for long
-	   enough for the driver to get loaded. With MDIO, the NIC
-	   driver will get bored and give up as soon as it finds that
-	   there's no driver _already_ loaded. */
+	 * bother trying to do so only if it isn't already loaded,
+	 * because that gets complicated. A hotplug event would have
+	 * done an unconditional modprobe anyway.
+	 * We don't do normal hotplug because it won't work for MDIO
+	 * -- because it relies on the device staying around for long
+	 * enough for the driver to get loaded. With MDIO, the NIC
+	 * driver will get bored and give up as soon as it finds that
+	 * there's no driver _already_ loaded.
+	 */
 	request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
 
 	device_initialize(&dev->dev);
@@ -299,10 +294,8 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
 	if (is_c45)
 		return get_phy_c45_ids(bus, addr, phy_id, c45_ids);
 
-	/* Grab the bits from PHYIR1, and put them
-	 * in the upper half */
+	/* Grab the bits from PHYIR1, and put them in the upper half */
 	phy_reg = mdiobus_read(bus, addr, MII_PHYSID1);
-
 	if (phy_reg < 0)
 		return -EIO;
 
@@ -310,7 +303,6 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
 
 	/* Grab the bits from PHYIR2, and put them in the lower half */
 	phy_reg = mdiobus_read(bus, addr, MII_PHYSID2);
-
 	if (phy_reg < 0)
 		return -EIO;
 
@@ -320,7 +312,8 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
 }
 
 /**
- * get_phy_device - reads the specified PHY device and returns its @phy_device struct
+ * get_phy_device - reads the specified PHY device and returns its @phy_device
+ *		    struct
  * @bus: the target MII bus
  * @addr: PHY address on the MII bus
  * @is_c45: If true the PHY uses the 802.3 clause 45 protocol
@@ -331,7 +324,6 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
 {
 	struct phy_c45_device_ids c45_ids = {0};
-	struct phy_device *dev = NULL;
 	u32 phy_id = 0;
 	int r;
 
@@ -343,9 +335,7 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
 	if ((phy_id & 0x1fffffff) == 0x1fffffff)
 		return NULL;
 
-	dev = phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
-
-	return dev;
+	return phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
 }
 EXPORT_SYMBOL(get_phy_device);
 
@@ -357,14 +347,17 @@ int phy_device_register(struct phy_device *phydev)
 {
 	int err;
 
-	/* Don't register a phy if one is already registered at this
-	 * address */
+	/* Don't register a phy if one is already registered at this address */
 	if (phydev->bus->phy_map[phydev->addr])
 		return -EINVAL;
 	phydev->bus->phy_map[phydev->addr] = phydev;
 
 	/* Run all of the fixups for this PHY */
-	phy_scan_fixups(phydev);
+	err = phy_init_hw(phydev);
+	if (err) {
+		pr_err("PHY %d failed to initialize\n", phydev->addr);
+		goto out;
+	}
 
 	err = device_add(&phydev->dev);
 	if (err) {
@@ -409,7 +402,7 @@ EXPORT_SYMBOL(phy_find_first);
  *   this function.
  */
 static void phy_prepare_link(struct phy_device *phydev,
-		void (*handler)(struct net_device *))
+			     void (*handler)(struct net_device *))
 {
 	phydev->adjust_link = handler;
 }
@@ -432,7 +425,7 @@ int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
 		return rc;
 
 	phy_prepare_link(phydev, handler);
-	phy_start_machine(phydev, NULL);
+	phy_start_machine(phydev);
 	if (phydev->irq > 0)
 		phy_start_interrupts(phydev);
 
@@ -455,16 +448,17 @@ EXPORT_SYMBOL(phy_connect_direct);
  *   choose to call only the subset of functions which provide
  *   the desired functionality.
  */
-struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
-		void (*handler)(struct net_device *),
-		phy_interface_t interface)
+struct phy_device *phy_connect(struct net_device *dev, const char *bus_id,
+			       void (*handler)(struct net_device *),
+			       phy_interface_t interface)
 {
 	struct phy_device *phydev;
 	struct device *d;
 	int rc;
 
 	/* Search the list of PHY devices on the mdio bus for the
-	 * PHY with the requested name */
+	 * PHY with the requested name
+	 */
 	d = bus_find_device_by_name(&mdio_bus_type, NULL, bus_id);
 	if (!d) {
 		pr_err("PHY %s not found\n", bus_id);
@@ -481,7 +475,8 @@ struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
 EXPORT_SYMBOL(phy_connect);
 
 /**
- * phy_disconnect - disable interrupts, stop state machine, and detach a PHY device
+ * phy_disconnect - disable interrupts, stop state machine, and detach a PHY
+ *		    device
  * @phydev: target phy_device struct
  */
 void phy_disconnect(struct phy_device *phydev)
@@ -490,13 +485,53 @@ void phy_disconnect(struct phy_device *phydev)
 		phy_stop_interrupts(phydev);
 
 	phy_stop_machine(phydev);
-	
+
 	phydev->adjust_link = NULL;
 
 	phy_detach(phydev);
 }
 EXPORT_SYMBOL(phy_disconnect);
 
+/**
+ * phy_poll_reset - Safely wait until a PHY reset has properly completed
+ * @phydev: The PHY device to poll
+ *
+ * Description: According to IEEE 802.3, Section 2, Subsection 22.2.4.1.1, as
+ *   published in 2008, a PHY reset may take up to 0.5 seconds.  The MII BMCR
+ *   register must be polled until the BMCR_RESET bit clears.
+ *
+ *   Furthermore, any attempts to write to PHY registers may have no effect
+ *   or even generate MDIO bus errors until this is complete.
+ *
+ *   Some PHYs (such as the Marvell 88E1111) don't entirely conform to the
+ *   standard and do not fully reset after the BMCR_RESET bit is set, and may
+ *   even *REQUIRE* a soft-reset to properly restart autonegotiation.  In an
+ *   effort to support such broken PHYs, this function is separate from the
+ *   standard phy_init_hw() which will zero all the other bits in the BMCR
+ *   and reapply all driver-specific and board-specific fixups.
+ */
+static int phy_poll_reset(struct phy_device *phydev)
+{
+	/* Poll until the reset bit clears (50ms per retry == 0.6 sec) */
+	unsigned int retries = 12;
+	int ret;
+
+	do {
+		msleep(50);
+		ret = phy_read(phydev, MII_BMCR);
+		if (ret < 0)
+			return ret;
+	} while (ret & BMCR_RESET && --retries);
+	if (ret & BMCR_RESET)
+		return -ETIMEDOUT;
+
+	/* Some chips (smsc911x) may still need up to another 1ms after the
+	 * BMCR_RESET bit is cleared before they are usable.
+	 */
+	msleep(1);
+	return 0;
+}
+
 int phy_init_hw(struct phy_device *phydev)
 {
 	int ret;
@@ -504,12 +539,21 @@ int phy_init_hw(struct phy_device *phydev)
 	if (!phydev->drv || !phydev->drv->config_init)
 		return 0;
 
+	ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
+	if (ret < 0)
+		return ret;
+
+	ret = phy_poll_reset(phydev);
+	if (ret < 0)
+		return ret;
+
 	ret = phy_scan_fixups(phydev);
 	if (ret < 0)
 		return ret;
 
 	return phydev->drv->config_init(phydev);
 }
+EXPORT_SYMBOL(phy_init_hw);
 
 /**
  * phy_attach_direct - attach a network device to a given PHY device pointer
@@ -520,26 +564,25 @@ int phy_init_hw(struct phy_device *phydev)
  *
  * Description: Called by drivers to attach to a particular PHY
  *     device. The phy_device is found, and properly hooked up
- *     to the phy_driver.  If no driver is attached, then the
- *     genphy_driver is used.  The phy_device is given a ptr to
+ *     to the phy_driver.  If no driver is attached, then a
+ *     generic driver is used.  The phy_device is given a ptr to
  *     the attaching device, and given a callback for link status
  *     change.  The phy_device is returned to the attaching driver.
  */
-static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
-			     u32 flags, phy_interface_t interface)
+int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+		      u32 flags, phy_interface_t interface)
 {
 	struct device *d = &phydev->dev;
 	int err;
 
 	/* Assume that if there is no driver, that it doesn't
-	 * exist, and we should use the genphy driver. */
+	 * exist, and we should use the genphy driver.
+	 */
 	if (NULL == d->driver) {
-		if (phydev->is_c45) {
-			pr_err("No driver for phy %x\n", phydev->phy_id);
-			return -ENODEV;
-		}
-
-		d->driver = &genphy_driver.driver;
+		if (phydev->is_c45)
+			d->driver = &genphy_driver[GENPHY_DRV_10G].driver;
+		else
+			d->driver = &genphy_driver[GENPHY_DRV_1G].driver;
 
 		err = d->driver->probe(d);
 		if (err >= 0)
@@ -565,13 +608,17 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
 
 	/* Do initial configuration here, now that
 	 * we have certain key parameters
-	 * (dev_flags and interface) */
+	 * (dev_flags and interface)
+	 */
 	err = phy_init_hw(phydev);
 	if (err)
 		phy_detach(phydev);
 
+	phy_resume(phydev);
+
 	return err;
 }
+EXPORT_SYMBOL(phy_attach_direct);
 
 /**
  * phy_attach - attach a network device to a particular PHY device
@@ -582,8 +629,8 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
  * Description: Same as phy_attach_direct() except that a PHY bus_id
  *     string is passed instead of a pointer to a struct phy_device.
  */
-struct phy_device *phy_attach(struct net_device *dev,
-		const char *bus_id, phy_interface_t interface)
+struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
+			      phy_interface_t interface)
 {
 	struct bus_type *bus = &mdio_bus_type;
 	struct phy_device *phydev;
@@ -591,7 +638,8 @@ struct phy_device *phy_attach(struct net_device *dev,
 	int rc;
 
 	/* Search the list of PHY devices on the mdio bus for the
-	 * PHY with the requested name */
+	 * PHY with the requested name
+	 */
 	d = bus_find_device_by_name(bus, NULL, bus_id);
 	if (!d) {
 		pr_err("PHY %s not found\n", bus_id);
@@ -613,18 +661,49 @@ EXPORT_SYMBOL(phy_attach);
  */
 void phy_detach(struct phy_device *phydev)
 {
+	int i;
 	phydev->attached_dev->phydev = NULL;
 	phydev->attached_dev = NULL;
+	phy_suspend(phydev);
 
 	/* If the device had no specific driver before (i.e. - it
 	 * was using the generic driver), we unbind the device
 	 * from the generic driver so that there's a chance a
-	 * real driver could be loaded */
-	if (phydev->dev.driver == &genphy_driver.driver)
-		device_release_driver(&phydev->dev);
+	 * real driver could be loaded
+	 */
+	for (i = 0; i < ARRAY_SIZE(genphy_driver); i++) {
+		if (phydev->dev.driver == &genphy_driver[i].driver) {
+			device_release_driver(&phydev->dev);
+			break;
+		}
+	}
 }
 EXPORT_SYMBOL(phy_detach);
 
+int phy_suspend(struct phy_device *phydev)
+{
+	struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
+	struct ethtool_wolinfo wol;
+
+	/* If the device has WOL enabled, we cannot suspend the PHY */
+	wol.cmd = ETHTOOL_GWOL;
+	phy_ethtool_get_wol(phydev, &wol);
+	if (wol.wolopts)
+		return -EBUSY;
+
+	if (phydrv->suspend)
+		return phydrv->suspend(phydev);
+	return 0;
+}
+
+int phy_resume(struct phy_device *phydev)
+{
+	struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
+
+	if (phydrv->resume)
+		return phydrv->resume(phydev);
+	return 0;
+}
 
 /* Generic PHY support and helper functions */
 
@@ -643,17 +722,16 @@ static int genphy_config_advert(struct phy_device *phydev)
 	int oldadv, adv;
 	int err, changed = 0;
 
-	/* Only allow advertising what
-	 * this PHY supports */
+	/* Only allow advertising what this PHY supports */
 	phydev->advertising &= phydev->supported;
 	advertise = phydev->advertising;
 
 	/* Setup standard advertisement */
-	oldadv = adv = phy_read(phydev, MII_ADVERTISE);
-
+	adv = phy_read(phydev, MII_ADVERTISE);
 	if (adv < 0)
 		return adv;
 
+	oldadv = adv;
 	adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
 		 ADVERTISE_PAUSE_ASYM);
 	adv |= ethtool_adv_to_mii_adv_t(advertise);
@@ -668,12 +746,12 @@ static int genphy_config_advert(struct phy_device *phydev)
 
 	/* Configure gigabit if it's supported */
 	if (phydev->supported & (SUPPORTED_1000baseT_Half |
-				SUPPORTED_1000baseT_Full)) {
-		oldadv = adv = phy_read(phydev, MII_CTRL1000);
-
+				 SUPPORTED_1000baseT_Full)) {
+		adv = phy_read(phydev, MII_CTRL1000);
 		if (adv < 0)
 			return adv;
 
+		oldadv = adv;
 		adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
 		adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
 
@@ -699,10 +777,10 @@ static int genphy_config_advert(struct phy_device *phydev)
  */
 int genphy_setup_forced(struct phy_device *phydev)
 {
-	int err;
 	int ctl = 0;
 
-	phydev->pause = phydev->asym_pause = 0;
+	phydev->pause = 0;
+	phydev->asym_pause = 0;
 
 	if (SPEED_1000 == phydev->speed)
 		ctl |= BMCR_SPEED1000;
@@ -711,10 +789,8 @@ int genphy_setup_forced(struct phy_device *phydev)
 
 	if (DUPLEX_FULL == phydev->duplex)
 		ctl |= BMCR_FULLDPLX;
-	
-	err = phy_write(phydev, MII_BMCR, ctl);
 
-	return err;
+	return phy_write(phydev, MII_BMCR, ctl);
 }
 EXPORT_SYMBOL(genphy_setup_forced);
 
@@ -724,25 +800,20 @@ EXPORT_SYMBOL(genphy_setup_forced);
  */
 int genphy_restart_aneg(struct phy_device *phydev)
 {
-	int ctl;
-
-	ctl = phy_read(phydev, MII_BMCR);
+	int ctl = phy_read(phydev, MII_BMCR);
 
 	if (ctl < 0)
 		return ctl;
 
-	ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+	ctl |= BMCR_ANENABLE | BMCR_ANRESTART;
 
 	/* Don't isolate the PHY if we're negotiating */
-	ctl &= ~(BMCR_ISOLATE);
+	ctl &= ~BMCR_ISOLATE;
 
-	ctl = phy_write(phydev, MII_BMCR, ctl);
-
-	return ctl;
+	return phy_write(phydev, MII_BMCR, ctl);
 }
 EXPORT_SYMBOL(genphy_restart_aneg);
 
-
 /**
  * genphy_config_aneg - restart auto-negotiation or write BMCR
  * @phydev: target phy_device struct
@@ -759,13 +830,12 @@ int genphy_config_aneg(struct phy_device *phydev)
 		return genphy_setup_forced(phydev);
 
 	result = genphy_config_advert(phydev);
-
 	if (result < 0) /* error */
 		return result;
-
 	if (result == 0) {
 		/* Advertisement hasn't changed, but maybe aneg was never on to
-		 * begin with?  Or maybe phy was isolated? */
+		 * begin with?  Or maybe phy was isolated?
+		 */
 		int ctl = phy_read(phydev, MII_BMCR);
 
 		if (ctl < 0)
@@ -776,7 +846,8 @@ int genphy_config_aneg(struct phy_device *phydev)
 	}
 
 	/* Only restart aneg if we are advertising something different
-	 * than we were before.	 */
+	 * than we were before.
+	 */
 	if (result > 0)
 		result = genphy_restart_aneg(phydev);
 
@@ -784,6 +855,11 @@ int genphy_config_aneg(struct phy_device *phydev)
 }
 EXPORT_SYMBOL(genphy_config_aneg);
 
+static int gen10g_config_aneg(struct phy_device *phydev)
+{
+	return 0;
+}
+
 /**
  * genphy_update_link - update link status in @phydev
  * @phydev: target phy_device struct
@@ -798,13 +874,11 @@ int genphy_update_link(struct phy_device *phydev)
 
 	/* Do a fake read */
 	status = phy_read(phydev, MII_BMSR);
-
 	if (status < 0)
 		return status;
 
 	/* Read link and autonegotiation status */
 	status = phy_read(phydev, MII_BMSR);
-
 	if (status < 0)
 		return status;
 
@@ -833,35 +907,36 @@ int genphy_read_status(struct phy_device *phydev)
 	int lpa;
 	int lpagb = 0;
 
-	/* Update the link, but return if there
-	 * was an error */
+	/* Update the link, but return if there was an error */
 	err = genphy_update_link(phydev);
 	if (err)
 		return err;
 
+	phydev->lp_advertising = 0;
+
 	if (AUTONEG_ENABLE == phydev->autoneg) {
 		if (phydev->supported & (SUPPORTED_1000baseT_Half
 					| SUPPORTED_1000baseT_Full)) {
 			lpagb = phy_read(phydev, MII_STAT1000);
-
 			if (lpagb < 0)
 				return lpagb;
 
 			adv = phy_read(phydev, MII_CTRL1000);
-
 			if (adv < 0)
 				return adv;
 
+			phydev->lp_advertising =
+				mii_stat1000_to_ethtool_lpa_t(lpagb);
 			lpagb &= adv << 2;
 		}
 
 		lpa = phy_read(phydev, MII_LPA);
-
 		if (lpa < 0)
 			return lpa;
 
-		adv = phy_read(phydev, MII_ADVERTISE);
+		phydev->lp_advertising |= mii_lpa_to_ethtool_lpa_t(lpa);
 
+		adv = phy_read(phydev, MII_ADVERTISE);
 		if (adv < 0)
 			return adv;
 
@@ -869,7 +944,8 @@ int genphy_read_status(struct phy_device *phydev)
 
 		phydev->speed = SPEED_10;
 		phydev->duplex = DUPLEX_HALF;
-		phydev->pause = phydev->asym_pause = 0;
+		phydev->pause = 0;
+		phydev->asym_pause = 0;
 
 		if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
 			phydev->speed = SPEED_1000;
@@ -878,19 +954,20 @@ int genphy_read_status(struct phy_device *phydev)
 				phydev->duplex = DUPLEX_FULL;
 		} else if (lpa & (LPA_100FULL | LPA_100HALF)) {
 			phydev->speed = SPEED_100;
-			
+
 			if (lpa & LPA_100FULL)
 				phydev->duplex = DUPLEX_FULL;
 		} else
 			if (lpa & LPA_10FULL)
 				phydev->duplex = DUPLEX_FULL;
 
-		if (phydev->duplex == DUPLEX_FULL){
+		if (phydev->duplex == DUPLEX_FULL) {
 			phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
 			phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
 		}
 	} else {
 		int bmcr = phy_read(phydev, MII_BMCR);
+
 		if (bmcr < 0)
 			return bmcr;
 
@@ -906,27 +983,55 @@ int genphy_read_status(struct phy_device *phydev)
 		else
 			phydev->speed = SPEED_10;
 
-		phydev->pause = phydev->asym_pause = 0;
+		phydev->pause = 0;
+		phydev->asym_pause = 0;
 	}
 
 	return 0;
 }
 EXPORT_SYMBOL(genphy_read_status);
 
+static int gen10g_read_status(struct phy_device *phydev)
+{
+	int devad, reg;
+	u32 mmd_mask = phydev->c45_ids.devices_in_package;
+
+	phydev->link = 1;
+
+	/* For now just lie and say it's 10G all the time */
+	phydev->speed = SPEED_10000;
+	phydev->duplex = DUPLEX_FULL;
+
+	for (devad = 0; mmd_mask; devad++, mmd_mask = mmd_mask >> 1) {
+		if (!(mmd_mask & 1))
+			continue;
+
+		/* Read twice because link state is latched and a
+		 * read moves the current state into the register
+		 */
+		phy_read_mmd(phydev, devad, MDIO_STAT1);
+		reg = phy_read_mmd(phydev, devad, MDIO_STAT1);
+		if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS))
+			phydev->link = 0;
+	}
+
+	return 0;
+}
+
 static int genphy_config_init(struct phy_device *phydev)
 {
 	int val;
 	u32 features;
 
 	/* For now, I'll claim that the generic driver supports
-	 * all possible port types */
+	 * all possible port types
+	 */
 	features = (SUPPORTED_TP | SUPPORTED_MII
 			| SUPPORTED_AUI | SUPPORTED_FIBRE |
 			SUPPORTED_BNC);
 
 	/* Do we support autonegotiation? */
 	val = phy_read(phydev, MII_BMSR);
-
 	if (val < 0)
 		return val;
 
@@ -944,7 +1049,6 @@ static int genphy_config_init(struct phy_device *phydev)
 
 	if (val & BMSR_ESTATEN) {
 		val = phy_read(phydev, MII_ESTATUS);
-
 		if (val < 0)
 			return val;
 
@@ -959,6 +1063,16 @@ static int genphy_config_init(struct phy_device *phydev)
 
 	return 0;
 }
+
+static int gen10g_config_init(struct phy_device *phydev)
+{
+	/* Temporarily just say we support everything */
+	phydev->supported = SUPPORTED_10000baseT_Full;
+	phydev->advertising = SUPPORTED_10000baseT_Full;
+
+	return 0;
+}
+
 int genphy_suspend(struct phy_device *phydev)
 {
 	int value;
@@ -966,7 +1080,7 @@ int genphy_suspend(struct phy_device *phydev)
 	mutex_lock(&phydev->lock);
 
 	value = phy_read(phydev, MII_BMCR);
-	phy_write(phydev, MII_BMCR, (value | BMCR_PDOWN));
+	phy_write(phydev, MII_BMCR, value | BMCR_PDOWN);
 
 	mutex_unlock(&phydev->lock);
 
@@ -974,6 +1088,11 @@ int genphy_suspend(struct phy_device *phydev)
 }
 EXPORT_SYMBOL(genphy_suspend);
 
+static int gen10g_suspend(struct phy_device *phydev)
+{
+	return 0;
+}
+
 int genphy_resume(struct phy_device *phydev)
 {
 	int value;
@@ -981,7 +1100,7 @@ int genphy_resume(struct phy_device *phydev)
 	mutex_lock(&phydev->lock);
 
 	value = phy_read(phydev, MII_BMCR);
-	phy_write(phydev, MII_BMCR, (value & ~BMCR_PDOWN));
+	phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
 
 	mutex_unlock(&phydev->lock);
 
@@ -989,6 +1108,11 @@ int genphy_resume(struct phy_device *phydev)
 }
 EXPORT_SYMBOL(genphy_resume);
 
+static int gen10g_resume(struct phy_device *phydev)
+{
+	return 0;
+}
+
 /**
  * phy_probe - probe and init a PHY device
  * @dev: device to probe and init
@@ -999,22 +1123,18 @@ EXPORT_SYMBOL(genphy_resume);
  */
 static int phy_probe(struct device *dev)
 {
-	struct phy_device *phydev;
-	struct phy_driver *phydrv;
-	struct device_driver *drv;
+	struct phy_device *phydev = to_phy_device(dev);
+	struct device_driver *drv = phydev->dev.driver;
+	struct phy_driver *phydrv = to_phy_driver(drv);
 	int err = 0;
 
-	phydev = to_phy_device(dev);
-
-	drv = phydev->dev.driver;
-	phydrv = to_phy_driver(drv);
 	phydev->drv = phydrv;
 
 	/* Disable the interrupt if the PHY doesn't support it
 	 * but the interrupt is still a valid one
 	 */
 	if (!(phydrv->flags & PHY_HAS_INTERRUPT) &&
-			phy_interrupt_is_valid(phydev))
+	    phy_interrupt_is_valid(phydev))
 		phydev->irq = PHY_POLL;
 
 	if (phydrv->flags & PHY_IS_INTERNAL)
@@ -1024,7 +1144,8 @@ static int phy_probe(struct device *dev)
 
 	/* Start out supporting everything. Eventually,
 	 * a controller will attach, and may modify one
-	 * or both of these values */
+	 * or both of these values
+	 */
 	phydev->supported = phydrv->features;
 	phydev->advertising = phydrv->features;
 
@@ -1037,14 +1158,11 @@ static int phy_probe(struct device *dev)
 	mutex_unlock(&phydev->lock);
 
 	return err;
-
 }
 
 static int phy_remove(struct device *dev)
 {
-	struct phy_device *phydev;
-
-	phydev = to_phy_device(dev);
+	struct phy_device *phydev = to_phy_device(dev);
 
 	mutex_lock(&phydev->lock);
 	phydev->state = PHY_DOWN;
@@ -1071,7 +1189,6 @@ int phy_driver_register(struct phy_driver *new_driver)
 	new_driver->driver.remove = phy_remove;
 
 	retval = driver_register(&new_driver->driver);
-
 	if (retval) {
 		pr_err("%s: Error %d in registering driver\n",
 		       new_driver->name, retval);
@@ -1110,13 +1227,14 @@ EXPORT_SYMBOL(phy_driver_unregister);
 void phy_drivers_unregister(struct phy_driver *drv, int n)
 {
 	int i;
-	for (i = 0; i < n; i++) {
+
+	for (i = 0; i < n; i++)
 		phy_driver_unregister(drv + i);
-	}
 }
 EXPORT_SYMBOL(phy_drivers_unregister);
 
-static struct phy_driver genphy_driver = {
+static struct phy_driver genphy_driver[] = {
+{
 	.phy_id		= 0xffffffff,
 	.phy_id_mask	= 0xffffffff,
 	.name		= "Generic PHY",
@@ -1126,8 +1244,19 @@ static struct phy_driver genphy_driver = {
 	.read_status	= genphy_read_status,
 	.suspend	= genphy_suspend,
 	.resume		= genphy_resume,
-	.driver		= {.owner= THIS_MODULE, },
-};
+	.driver		= { .owner = THIS_MODULE, },
+}, {
+	.phy_id         = 0xffffffff,
+	.phy_id_mask    = 0xffffffff,
+	.name           = "Generic 10G PHY",
+	.config_init    = gen10g_config_init,
+	.features       = 0,
+	.config_aneg    = gen10g_config_aneg,
+	.read_status    = gen10g_read_status,
+	.suspend        = gen10g_suspend,
+	.resume         = gen10g_resume,
+	.driver         = {.owner = THIS_MODULE, },
+} };
 
 static int __init phy_init(void)
 {
@@ -1137,7 +1266,8 @@ static int __init phy_init(void)
 	if (rc)
 		return rc;
 
-	rc = phy_driver_register(&genphy_driver);
+	rc = phy_drivers_register(genphy_driver,
+				  ARRAY_SIZE(genphy_driver));
 	if (rc)
 		mdio_bus_exit();
 
@@ -1146,7 +1276,8 @@ static int __init phy_init(void)
 
 static void __exit phy_exit(void)
 {
-	phy_driver_unregister(&genphy_driver);
+	phy_drivers_unregister(genphy_driver,
+			       ARRAY_SIZE(genphy_driver));
 	mdio_bus_exit();
 }
 
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index f3bea1346021..4cf5fb922e59 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -15,7 +15,6 @@
 
 #include <linux/types.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/device.h>
@@ -171,14 +170,14 @@ static int ks8995_write(struct ks8995_switch *ks, char *buf,
 
 static inline int ks8995_read_reg(struct ks8995_switch *ks, u8 addr, u8 *buf)
 {
-	return (ks8995_read(ks, buf, addr, 1) != 1);
+	return ks8995_read(ks, buf, addr, 1) != 1;
 }
 
 static inline int ks8995_write_reg(struct ks8995_switch *ks, u8 addr, u8 val)
 {
 	char buf = val;
 
-	return (ks8995_write(ks, &buf, addr, 1) != 1);
+	return ks8995_write(ks, &buf, addr, 1) != 1;
 }
 
 /* ------------------------------------------------------------------------ */
@@ -325,7 +324,6 @@ static int ks8995_probe(struct spi_device *spi)
 	return 0;
 
 err_drvdata:
-	spi_set_drvdata(spi, NULL);
 	kfree(ks);
 	return err;
 }
@@ -337,7 +335,6 @@ static int ks8995_remove(struct spi_device *spi)
 	ks8995 = spi_get_drvdata(spi);
 	sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
 
-	spi_set_drvdata(spi, NULL);
 	kfree(ks8995);
 
 	return 0;
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 7b4ff35c8bf7..040b8978d6ca 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -547,9 +547,9 @@ static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
 	skb_pull(skb,dev->hard_header_len);
 	eth = eth_hdr(skb);
 
-	if(*eth->h_dest&1)
+	if(is_multicast_ether_addr(eth->h_dest))
 	{
-		if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
+		if(ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
 			skb->pkt_type=PACKET_BROADCAST;
 		else
 			skb->pkt_type=PACKET_MULTICAST;
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 9a1849a83e2a..911b21602ff2 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -27,8 +27,7 @@
  *   GNU General Public License for more details.
  *
  *   You should have received a copy of the GNU General Public License
- *   along with this program; if not, write to the Free Software
- *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *   along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  *
  * Changelog:
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 82ee6ed954cb..2ea7efd11857 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -131,12 +131,12 @@ static inline struct pppoe_net *pppoe_pernet(struct net *net)
 
 static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b)
 {
-	return a->sid == b->sid && !memcmp(a->remote, b->remote, ETH_ALEN);
+	return a->sid == b->sid && ether_addr_equal(a->remote, b->remote);
 }
 
 static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr)
 {
-	return a->sid == sid && !memcmp(a->remote, addr, ETH_ALEN);
+	return a->sid == sid && ether_addr_equal(a->remote, addr);
 }
 
 #if 8 % PPPOE_HASH_BITS
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index b75ae5bde673..28407426fd6f 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2034,6 +2034,10 @@ static void team_setup(struct net_device *dev)
 
 	dev->features |= NETIF_F_LLTX;
 	dev->features |= NETIF_F_GRO;
+
+	/* Don't allow team devices to change network namespaces. */
+	dev->features |= NETIF_F_NETNS_LOCAL;
+
 	dev->hw_features = TEAM_VLAN_FEATURES |
 			   NETIF_F_HW_VLAN_CTAG_TX |
 			   NETIF_F_HW_VLAN_CTAG_RX |
@@ -2851,7 +2855,7 @@ static int team_device_event(struct notifier_block *unused,
 	case NETDEV_FEAT_CHANGE:
 		team_compute_features(port->team);
 		break;
-	case NETDEV_CHANGEMTU:
+	case NETDEV_PRECHANGEMTU:
 		/* Forbid to change mtu of underlaying device */
 		return NOTIFY_BAD;
 	case NETDEV_PRE_TYPE_CHANGE:
diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
index 7f032e211343..cd2f692b8074 100644
--- a/drivers/net/team/team_mode_random.c
+++ b/drivers/net/team/team_mode_random.c
@@ -13,20 +13,14 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/skbuff.h>
-#include <linux/reciprocal_div.h>
 #include <linux/if_team.h>
 
-static u32 random_N(unsigned int N)
-{
-	return reciprocal_divide(prandom_u32(), N);
-}
-
 static bool rnd_transmit(struct team *team, struct sk_buff *skb)
 {
 	struct team_port *port;
 	int port_index;
 
-	port_index = random_N(team->en_port_count);
+	port_index = prandom_u32_max(team->en_port_count);
 	port = team_get_port_by_index_rcu(team, port_index);
 	if (unlikely(!port))
 		goto drop;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ecec8029c5e8..bcf01af4b879 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -110,7 +110,7 @@ struct tap_filter {
 	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
 };
 
-/* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for
+/* DEFAULT_MAX_NUM_RSS_QUEUES were chosen to let the rx/tx queues allocated for
  * the netdevice to be fit in one page. So we can make sure the success of
  * memory allocation. TODO: increase the limit. */
 #define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
@@ -119,7 +119,7 @@ struct tap_filter {
 #define TUN_FLOW_EXPIRE (3 * HZ)
 
 /* A tun_file connects an open character device to a tuntap netdevice. It
- * also contains all socket related strctures (except sock_fprog and tap_filter)
+ * also contains all socket related structures (except sock_fprog and tap_filter)
  * to serve as one transmit queue for tuntap device. The sock_fprog and
  * tap_filter were kept in tun_struct since they were used for filtering for the
  * netdevice not for a specific queue (at least I didn't see the requirement for
@@ -152,6 +152,7 @@ struct tun_flow_entry {
 	struct tun_struct *tun;
 
 	u32 rxhash;
+	u32 rps_rxhash;
 	int queue_index;
 	unsigned long updated;
 };
@@ -220,6 +221,7 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
 			  rxhash, queue_index);
 		e->updated = jiffies;
 		e->rxhash = rxhash;
+		e->rps_rxhash = 0;
 		e->queue_index = queue_index;
 		e->tun = tun;
 		hlist_add_head_rcu(&e->hash_link, head);
@@ -232,6 +234,7 @@ static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
 {
 	tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
 		  e->rxhash, e->queue_index);
+	sock_rps_reset_flow_hash(e->rps_rxhash);
 	hlist_del_rcu(&e->hash_link);
 	kfree_rcu(e, rcu);
 	--tun->flow_count;
@@ -325,6 +328,7 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
 		/* TODO: keep queueing to old queue until it's empty? */
 		e->queue_index = queue_index;
 		e->updated = jiffies;
+		sock_rps_record_flow_hash(e->rps_rxhash);
 	} else {
 		spin_lock_bh(&tun->lock);
 		if (!tun_flow_find(head, rxhash) &&
@@ -341,8 +345,20 @@ unlock:
 	rcu_read_unlock();
 }
 
+/**
+ * Save the hash received in the stack receive path and update the
+ * flow_hash table accordingly.
+ */
+static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
+{
+	if (unlikely(e->rps_rxhash != hash)) {
+		sock_rps_reset_flow_hash(e->rps_rxhash);
+		e->rps_rxhash = hash;
+	}
+}
+
 /* We try to identify a flow through its rxhash first. The reason that
- * we do not check rxq no. is becuase some cards(e.g 82599), chooses
+ * we do not check rxq no. is because some cards(e.g 82599), chooses
  * the rxq based on the txq where the last packet of the flow comes. As
  * the userspace application move between processors, we may get a
  * different rxq no. here. If we could not get rxhash, then we would
@@ -359,12 +375,13 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
 	rcu_read_lock();
 	numqueues = ACCESS_ONCE(tun->numqueues);
 
-	txq = skb_get_rxhash(skb);
+	txq = skb_get_hash(skb);
 	if (txq) {
 		e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
-		if (e)
+		if (e) {
+			tun_flow_save_rps_rxhash(e, txq);
 			txq = e->queue_index;
-		else
+		} else
 			/* use multiply and shift instead of expensive divide */
 			txq = ((u64)txq * numqueues) >> 32;
 	} else if (likely(skb_rx_queue_recorded(skb))) {
@@ -532,7 +549,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
 
 	err = 0;
 
-	/* Re-attach the filter to presist device */
+	/* Re-attach the filter to persist device */
 	if (!skip_filter && (tun->filter_attached == true)) {
 		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
 		if (!err)
@@ -721,14 +738,32 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 	struct tun_struct *tun = netdev_priv(dev);
 	int txq = skb->queue_mapping;
 	struct tun_file *tfile;
+	u32 numqueues = 0;
 
 	rcu_read_lock();
 	tfile = rcu_dereference(tun->tfiles[txq]);
+	numqueues = ACCESS_ONCE(tun->numqueues);
 
 	/* Drop packet if interface is not attached */
-	if (txq >= tun->numqueues)
+	if (txq >= numqueues)
 		goto drop;
 
+	if (numqueues == 1) {
+		/* Select queue was not called for the skbuff, so we extract the
+		 * RPS hash and save it into the flow_table here.
+		 */
+		__u32 rxhash;
+
+		rxhash = skb_get_hash(skb);
+		if (rxhash) {
+			struct tun_flow_entry *e;
+			e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
+					rxhash);
+			if (e)
+				tun_flow_save_rps_rxhash(e, rxhash);
+		}
+	}
+
 	tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
 
 	BUG_ON(!tfile);
@@ -746,8 +781,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 	/* Limit the number of packets queued by dividing txq length with the
 	 * number of queues.
 	 */
-	if (skb_queue_len(&tfile->socket.sk->sk_receive_queue)
-			  >= dev->tx_queue_len / tun->numqueues)
+	if (skb_queue_len(&tfile->socket.sk->sk_receive_queue) * numqueues
+			  >= dev->tx_queue_len)
 		goto drop;
 
 	if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
@@ -820,9 +855,9 @@ static void tun_poll_controller(struct net_device *dev)
 	 * Tun only receives frames when:
 	 * 1) the char device endpoint gets data from user space
 	 * 2) the tun socket gets a sendmsg call from user space
-	 * Since both of those are syncronous operations, we are guaranteed
+	 * Since both of those are synchronous operations, we are guaranteed
 	 * never to have pending data when we poll for it
-	 * so theres nothing to do here but return.
+	 * so there is nothing to do here but return.
 	 * We need this though so netpoll recognizes us as an interface that
 	 * supports polling, which enables bridge devices in virt setups to
 	 * still use netconsole
@@ -1147,7 +1182,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 	skb_reset_network_header(skb);
 	skb_probe_transport_header(skb, 0);
 
-	rxhash = skb_get_rxhash(skb);
+	rxhash = skb_get_hash(skb);
 	netif_rx_ni(skb);
 
 	tun->dev->stats.rx_packets++;
@@ -1292,8 +1327,7 @@ done:
 }
 
 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
-			   struct kiocb *iocb, const struct iovec *iv,
-			   ssize_t len, int noblock)
+			   const struct iovec *iv, ssize_t len, int noblock)
 {
 	DECLARE_WAITQUEUE(wait, current);
 	struct sk_buff *skb;
@@ -1356,7 +1390,7 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
 		goto out;
 	}
 
-	ret = tun_do_read(tun, tfile, iocb, iv, len,
+	ret = tun_do_read(tun, tfile, iv, len,
 			  file->f_flags & O_NONBLOCK);
 	ret = min_t(ssize_t, ret, len);
 	if (ret > 0)
@@ -1457,7 +1491,7 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
 					 SOL_PACKET, TUN_TX_TIMESTAMP);
 		goto out;
 	}
-	ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
+	ret = tun_do_read(tun, tfile, m->msg_iov, total_len,
 			  flags & MSG_DONTWAIT);
 	if (ret > total_len) {
 		m->msg_flags |= MSG_TRUNC;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 47b0f732b0b1..6b638a066c1d 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -92,11 +92,12 @@ config USB_RTL8150
 	  module will be called rtl8150.
 
 config USB_RTL8152
-	tristate "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
+	tristate "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
 	select MII
 	help
 	  This option adds support for Realtek RTL8152 based USB 2.0
-	  10/100 Ethernet adapters.
+	  10/100 Ethernet adapters and RTL8153 based USB 3.0 10/100/1000
+	  Ethernet adapters.
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called r8152.
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index bdaa12d07a12..5d049d00c2d7 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -16,8 +16,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #ifndef _ASIX_H
@@ -28,7 +27,6 @@
 
 #include <linux/module.h>
 #include <linux/kmod.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 577c72d5f369..5c55f11572ba 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -16,8 +16,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include "asix.h"
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 386a3df53678..9765a7d4766d 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -16,8 +16,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include "asix.h"
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 723b3879ecc2..5f18fcb8dcc7 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -21,8 +21,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include "asix.h"
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 8e8d0fcd4979..d6f64dad05bc 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/module.h>
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index df507e6dbb9c..630caf48f63a 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -24,15 +24,13 @@
  * GNU General Public License for more details.
  * 
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  * 
  * Should you need to contact me, the author, you can do so either by
  * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
  * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
  */
 
-#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 08d55b6bf272..f7180f8db39e 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -14,12 +14,10 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ctype.h>
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 2023f3ea891e..42e176912c8e 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -14,15 +14,13 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 // #define	DEBUG			// error path messages, extra info
 // #define	VERBOSE			// more; success messages
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
@@ -487,6 +485,7 @@ static const struct driver_info wwan_info = {
 #define ZTE_VENDOR_ID		0x19D2
 #define DELL_VENDOR_ID		0x413C
 #define REALTEK_VENDOR_ID	0x0bda
+#define SAMSUNG_VENDOR_ID	0x04e8
 
 static const struct usb_device_id	products[] = {
 /* BLACKLIST !!
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index e15ec2b12035..dbff290ed0e4 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -39,7 +39,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/ctype.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index 0d1fe89ae0bd..91f0919fe278 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -13,13 +13,11 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/module.h>
 #include <linux/kmod.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 1e207f086b75..3eed708a6182 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -14,12 +14,10 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index e80219877730..6e9c344c7a20 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -13,7 +13,6 @@
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/stddef.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index a7e3f4e55bf3..e4a8a93fbaf7 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -14,15 +14,13 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 // #define	DEBUG			// error path messages, extra info
 // #define	VERBOSE			// more; success messages
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index ace9e74ffbdd..4ff70b22c6ee 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -20,8 +20,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/module.h>
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index ff8594d8dd2d..421934c83f1c 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -45,7 +45,6 @@
 
 #include <linux/kernel.h>
 #include <linux/errno.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 6866eae3e388..5662babf0583 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -15,7 +15,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ctype.h>
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index afb117c16d2d..a359d3bb7c5b 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -25,8 +25,7 @@
  *     GNU General Public License for more details.
  *
  *     You should have received a copy of the GNU General Public License
- *     along with this program; if not, write to the Free Software Foundation,
- *     Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *     along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  ****************************************************************/
 
@@ -46,7 +45,6 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/string.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index 808d6506da41..acfcc32b323d 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -15,8 +15,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index f54637828574..a305a7b2dae6 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -36,14 +36,12 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/crc32.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
-#include <linux/init.h>
 #include <linux/mii.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 93e0716a118c..0a85d9227775 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -13,15 +13,13 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 // #define	DEBUG			// error path messages, extra info
 // #define	VERBOSE			// more; success messages
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 0fcc8e65a068..3d18bb0eee85 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -13,15 +13,13 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 // #define	DEBUG			// error path messages, extra info
 // #define	VERBOSE			// more; success messages
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 51073721e224..e8fac732c6f1 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2013 Realtek Semiconductor Corp. All rights reserved.
+ *  Copyright (c) 2014 Realtek Semiconductor Corp. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -7,7 +7,6 @@
  *
  */
 
-#include <linux/init.h>
 #include <linux/signal.h>
 #include <linux/slab.h>
 #include <linux/module.h>
@@ -24,9 +23,9 @@
 #include <linux/ipv6.h>
 
 /* Version Information */
-#define DRIVER_VERSION "v1.02.0 (2013/10/28)"
+#define DRIVER_VERSION "v1.04.0 (2014/01/15)"
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
-#define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
+#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
 #define MODULENAME "r8152"
 
 #define R8152_PHY_ID		32
@@ -39,15 +38,24 @@
 #define PLA_RXFIFO_CTRL2	0xc0a8
 #define PLA_FMC			0xc0b4
 #define PLA_CFG_WOL		0xc0b6
+#define PLA_TEREDO_CFG		0xc0bc
 #define PLA_MAR			0xcd00
+#define PLA_BACKUP		0xd000
 #define PAL_BDC_CR		0xd1a0
+#define PLA_TEREDO_TIMER	0xd2cc
+#define PLA_REALWOW_TIMER	0xd2e8
 #define PLA_LEDSEL		0xdd90
 #define PLA_LED_FEATURE		0xdd92
 #define PLA_PHYAR		0xde00
+#define PLA_BOOT_CTRL		0xe004
 #define PLA_GPHY_INTR_IMR	0xe022
 #define PLA_EEE_CR		0xe040
 #define PLA_EEEP_CR		0xe080
 #define PLA_MAC_PWR_CTRL	0xe0c0
+#define PLA_MAC_PWR_CTRL2	0xe0ca
+#define PLA_MAC_PWR_CTRL3	0xe0cc
+#define PLA_MAC_PWR_CTRL4	0xe0ce
+#define PLA_WDT6_CTRL		0xe428
 #define PLA_TCR0		0xe610
 #define PLA_TCR1		0xe612
 #define PLA_TXFIFO_CTRL		0xe618
@@ -73,16 +81,25 @@
 #define PLA_BP_5		0xfc32
 #define PLA_BP_6		0xfc34
 #define PLA_BP_7		0xfc36
+#define PLA_BP_EN		0xfc38
 
+#define USB_U2P3_CTRL		0xb460
 #define USB_DEV_STAT		0xb808
 #define USB_USB_CTRL		0xd406
 #define USB_PHY_CTRL		0xd408
 #define USB_TX_AGG		0xd40a
 #define USB_RX_BUF_TH		0xd40c
 #define USB_USB_TIMER		0xd428
+#define USB_RX_EARLY_AGG	0xd42c
 #define USB_PM_CTRL_STATUS	0xd432
 #define USB_TX_DMA		0xd434
+#define USB_TOLERANCE		0xd490
+#define USB_LPM_CTRL		0xd41a
 #define USB_UPS_CTRL		0xd800
+#define USB_MISC_0		0xd81a
+#define USB_POWER_CUT		0xd80a
+#define USB_AFE_CTRL2		0xd824
+#define USB_WDT11_CTRL		0xe43c
 #define USB_BP_BA		0xfc26
 #define USB_BP_0		0xfc28
 #define USB_BP_1		0xfc2a
@@ -92,14 +109,30 @@
 #define USB_BP_5		0xfc32
 #define USB_BP_6		0xfc34
 #define USB_BP_7		0xfc36
+#define USB_BP_EN		0xfc38
 
 /* OCP Registers */
 #define OCP_ALDPS_CONFIG	0x2010
 #define OCP_EEE_CONFIG1		0x2080
 #define OCP_EEE_CONFIG2		0x2092
 #define OCP_EEE_CONFIG3		0x2094
+#define OCP_BASE_MII		0xa400
 #define OCP_EEE_AR		0xa41a
 #define OCP_EEE_DATA		0xa41c
+#define OCP_PHY_STATUS		0xa420
+#define OCP_POWER_CFG		0xa430
+#define OCP_EEE_CFG		0xa432
+#define OCP_SRAM_ADDR		0xa436
+#define OCP_SRAM_DATA		0xa438
+#define OCP_DOWN_SPEED		0xa442
+#define OCP_EEE_CFG2		0xa5d0
+#define OCP_ADC_CFG		0xbc06
+
+/* SRAM Register */
+#define SRAM_LPF_CFG		0x8012
+#define SRAM_10M_AMP1		0x8080
+#define SRAM_10M_AMP2		0x8082
+#define SRAM_IMPEDANCE		0x8084
 
 /* PLA_RCR */
 #define RCR_AAP			0x00000001
@@ -116,14 +149,17 @@
 #define RXFIFO_THR2_FULL	0x00000060
 #define RXFIFO_THR2_HIGH	0x00000038
 #define RXFIFO_THR2_OOB		0x0000004a
+#define RXFIFO_THR2_NORMAL	0x00a0
 
 /* PLA_RXFIFO_CTRL2 */
 #define RXFIFO_THR3_FULL	0x00000078
 #define RXFIFO_THR3_HIGH	0x00000048
 #define RXFIFO_THR3_OOB		0x0000005a
+#define RXFIFO_THR3_NORMAL	0x0110
 
 /* PLA_TXFIFO_CTRL */
 #define TXFIFO_THR_NORMAL	0x00400008
+#define TXFIFO_THR_NORMAL2	0x01000008
 
 /* PLA_FMC */
 #define FMC_FCR_MCU_EN		0x0001
@@ -131,6 +167,9 @@
 /* PLA_EEEP_CR */
 #define EEEP_CR_EEEP_TX		0x0002
 
+/* PLA_WDT6_CTRL */
+#define WDT6_SET_MODE		0x0010
+
 /* PLA_TCR0 */
 #define TCR0_TX_EMPTY		0x0800
 #define TCR0_AUTO_FIFO		0x0080
@@ -168,6 +207,12 @@
 /* PLA_CFG_WOL */
 #define MAGIC_EN		0x0001
 
+/* PLA_TEREDO_CFG */
+#define TEREDO_SEL		0x8000
+#define TEREDO_WAKE_MASK	0x7f00
+#define TEREDO_RS_EVENT_MASK	0x00fe
+#define OOB_TEREDO_EN		0x0001
+
 /* PAL_BDC_CR */
 #define ALDPS_PROXY_MODE	0x0001
 
@@ -185,6 +230,25 @@
 #define D3_CLK_GATED_EN		0x00004000
 #define MCU_CLK_RATIO		0x07010f07
 #define MCU_CLK_RATIO_MASK	0x0f0f0f0f
+#define ALDPS_SPDWN_RATIO	0x0f87
+
+/* PLA_MAC_PWR_CTRL2 */
+#define EEE_SPDWN_RATIO		0x8007
+
+/* PLA_MAC_PWR_CTRL3 */
+#define PKT_AVAIL_SPDWN_EN	0x0100
+#define SUSPEND_SPDWN_EN	0x0004
+#define U1U2_SPDWN_EN		0x0002
+#define L1_SPDWN_EN		0x0001
+
+/* PLA_MAC_PWR_CTRL4 */
+#define PWRSAVE_SPDWN_EN	0x1000
+#define RXDV_SPDWN_EN		0x0800
+#define TX10MIDLE_EN		0x0100
+#define TP100_SPDWN_EN		0x0020
+#define TP500_SPDWN_EN		0x0010
+#define TP1000_SPDWN_EN		0x0008
+#define EEE_SPDWN_EN		0x0001
 
 /* PLA_GPHY_INTR_IMR */
 #define GPHY_STS_MSK		0x0001
@@ -199,6 +263,9 @@
 #define EEE_RX_EN		0x0001
 #define EEE_TX_EN		0x0002
 
+/* PLA_BOOT_CTRL */
+#define AUTOLOAD_DONE		0x0002
+
 /* USB_DEV_STAT */
 #define STAT_SPEED_MASK		0x0006
 #define STAT_SPEED_HIGH		0x0000
@@ -208,7 +275,9 @@
 #define TX_AGG_MAX_THRESHOLD	0x03
 
 /* USB_RX_BUF_TH */
-#define RX_BUF_THR		0x7a120180
+#define RX_THR_SUPPER		0x0c350180
+#define RX_THR_HIGH		0x7a120180
+#define RX_THR_SLOW		0xffff0180
 
 /* USB_TX_DMA */
 #define TEST_MODE_DISABLE	0x00000001
@@ -218,17 +287,55 @@
 #define POWER_CUT		0x0100
 
 /* USB_PM_CTRL_STATUS */
-#define RWSUME_INDICATE		0x0001
+#define RESUME_INDICATE		0x0001
 
 /* USB_USB_CTRL */
 #define RX_AGG_DISABLE		0x0010
 
+/* USB_U2P3_CTRL */
+#define U2P3_ENABLE		0x0001
+
+/* USB_POWER_CUT */
+#define PWR_EN			0x0001
+#define PHASE2_EN		0x0008
+
+/* USB_MISC_0 */
+#define PCUT_STATUS		0x0001
+
+/* USB_RX_EARLY_AGG */
+#define EARLY_AGG_SUPPER	0x0e832981
+#define EARLY_AGG_HIGH		0x0e837a12
+#define EARLY_AGG_SLOW		0x0e83ffff
+
+/* USB_WDT11_CTRL */
+#define TIMER11_EN		0x0001
+
+/* USB_LPM_CTRL */
+#define LPM_TIMER_MASK		0x0c
+#define LPM_TIMER_500MS		0x04	/* 500 ms */
+#define LPM_TIMER_500US		0x0c	/* 500 us */
+
+/* USB_AFE_CTRL2 */
+#define SEN_VAL_MASK		0xf800
+#define SEN_VAL_NORMAL		0xa000
+#define SEL_RXIDLE		0x0100
+
 /* OCP_ALDPS_CONFIG */
 #define ENPWRSAVE		0x8000
 #define ENPDNPS			0x0200
 #define LINKENA			0x0100
 #define DIS_SDSAVE		0x0010
 
+/* OCP_PHY_STATUS */
+#define PHY_STAT_MASK		0x0007
+#define PHY_STAT_LAN_ON		3
+#define PHY_STAT_PWRDN		5
+
+/* OCP_POWER_CFG */
+#define EEE_CLKDIV_EN		0x8000
+#define EN_ALDPS		0x0004
+#define EN_10M_PLLOFF		0x0001
+
 /* OCP_EEE_CONFIG1 */
 #define RG_TXLPI_MSK_HFDUP	0x8000
 #define RG_MATCLR_EN		0x4000
@@ -263,7 +370,36 @@
 #define EEE_ADDR		0x003C
 #define EEE_DATA		0x0002
 
+/* OCP_EEE_CFG */
+#define CTAP_SHORT_EN		0x0040
+#define EEE10_EN		0x0010
+
+/* OCP_DOWN_SPEED */
+#define EN_10M_BGOFF		0x0080
+
+/* OCP_EEE_CFG2 */
+#define MY1000_EEE		0x0004
+#define MY100_EEE		0x0002
+
+/* OCP_ADC_CFG */
+#define CKADSEL_L		0x0100
+#define ADC_EN			0x0080
+#define EN_EMI_L		0x0040
+
+/* SRAM_LPF_CFG */
+#define LPF_AUTO_TUNE		0x8000
+
+/* SRAM_10M_AMP1 */
+#define GDAC_IB_UPALL		0x0008
+
+/* SRAM_10M_AMP2 */
+#define AMP_DN			0x0200
+
+/* SRAM_IMPEDANCE */
+#define RX_DRIVING_MASK		0x6000
+
 enum rtl_register_content {
+	_1000bps	= 0x10,
 	_100bps		= 0x08,
 	_10bps		= 0x04,
 	LINK_STATUS	= 0x02,
@@ -273,6 +409,9 @@ enum rtl_register_content {
 #define RTL8152_MAX_TX		10
 #define RTL8152_MAX_RX		10
 #define INTBUFSIZE		2
+#define CRC_SIZE		4
+#define TX_ALIGN		4
+#define RX_ALIGN		8
 
 #define INTR_LINK		0x0004
 
@@ -302,10 +441,17 @@ enum rtl8152_flags {
 /* Define these values to match your device */
 #define VENDOR_ID_REALTEK		0x0bda
 #define PRODUCT_ID_RTL8152		0x8152
+#define PRODUCT_ID_RTL8153		0x8153
+
+#define VENDOR_ID_SAMSUNG		0x04e8
+#define PRODUCT_ID_SAMSUNG		0xa101
 
 #define MCU_TYPE_PLA			0x0100
 #define MCU_TYPE_USB			0x0000
 
+#define REALTEK_USB_DEVICE(vend, prod)	\
+	USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC)
+
 struct rx_desc {
 	__le32 opts1;
 #define RX_LEN_MASK			0x7fff
@@ -363,6 +509,15 @@ struct r8152 {
 	spinlock_t rx_lock, tx_lock;
 	struct delayed_work schedule;
 	struct mii_if_info mii;
+
+	struct rtl_ops {
+		void (*init)(struct r8152 *);
+		int (*enable)(struct r8152 *);
+		void (*disable)(struct r8152 *);
+		void (*down)(struct r8152 *);
+		void (*unload)(struct r8152 *);
+	} rtl_ops;
+
 	int intr_interval;
 	u32 msg_enable;
 	u32 tx_qlen;
@@ -375,7 +530,11 @@ struct r8152 {
 enum rtl_version {
 	RTL_VER_UNKNOWN = 0,
 	RTL_VER_01,
-	RTL_VER_02
+	RTL_VER_02,
+	RTL_VER_03,
+	RTL_VER_04,
+	RTL_VER_05,
+	RTL_VER_MAX
 };
 
 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
@@ -427,8 +586,8 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
 static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
 				void *data, u16 type)
 {
-	u16	limit = 64;
-	int	ret = 0;
+	u16 limit = 64;
+	int ret = 0;
 
 	if (test_bit(RTL8152_UNPLUG, &tp->flags))
 		return -ENODEV;
@@ -467,9 +626,9 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
 static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
 				u16 size, void *data, u16 type)
 {
-	int	ret;
-	u16	byteen_start, byteen_end, byen;
-	u16	limit = 512;
+	int ret;
+	u16 byteen_start, byteen_end, byen;
+	u16 limit = 512;
 
 	if (test_bit(RTL8152_UNPLUG, &tp->flags))
 		return -ENODEV;
@@ -653,45 +812,54 @@ static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
 	generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
 }
 
-static void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value)
+static u16 ocp_reg_read(struct r8152 *tp, u16 addr)
 {
-	u32	ocp_data;
-	int	i;
+	u16 ocp_base, ocp_index;
 
-	ocp_data = PHYAR_FLAG | ((reg_addr & 0x1f) << 16) |
-		   (value & 0xffff);
+	ocp_base = addr & 0xf000;
+	if (ocp_base != tp->ocp_base) {
+		ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base);
+		tp->ocp_base = ocp_base;
+	}
 
-	ocp_write_dword(tp, MCU_TYPE_PLA, PLA_PHYAR, ocp_data);
+	ocp_index = (addr & 0x0fff) | 0xb000;
+	return ocp_read_word(tp, MCU_TYPE_PLA, ocp_index);
+}
 
-	for (i = 20; i > 0; i--) {
-		udelay(25);
-		ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_PHYAR);
-		if (!(ocp_data & PHYAR_FLAG))
-			break;
+static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
+{
+	u16 ocp_base, ocp_index;
+
+	ocp_base = addr & 0xf000;
+	if (ocp_base != tp->ocp_base) {
+		ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base);
+		tp->ocp_base = ocp_base;
 	}
-	udelay(20);
+
+	ocp_index = (addr & 0x0fff) | 0xb000;
+	ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data);
 }
 
-static int r8152_mdio_read(struct r8152 *tp, u32 reg_addr)
+static inline void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value)
 {
-	u32	ocp_data;
-	int	i;
-
-	ocp_data = (reg_addr & 0x1f) << 16;
-	ocp_write_dword(tp, MCU_TYPE_PLA, PLA_PHYAR, ocp_data);
+	ocp_reg_write(tp, OCP_BASE_MII + reg_addr * 2, value);
+}
 
-	for (i = 20; i > 0; i--) {
-		udelay(25);
-		ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_PHYAR);
-		if (ocp_data & PHYAR_FLAG)
-			break;
-	}
-	udelay(20);
+static inline int r8152_mdio_read(struct r8152 *tp, u32 reg_addr)
+{
+	return ocp_reg_read(tp, OCP_BASE_MII + reg_addr * 2);
+}
 
-	if (!(ocp_data & PHYAR_FLAG))
-		return -EAGAIN;
+static void sram_write(struct r8152 *tp, u16 addr, u16 data)
+{
+	ocp_reg_write(tp, OCP_SRAM_ADDR, addr);
+	ocp_reg_write(tp, OCP_SRAM_DATA, data);
+}
 
-	return (u16)(ocp_data & 0xffff);
+static u16 sram_read(struct r8152 *tp, u16 addr)
+{
+	ocp_reg_write(tp, OCP_SRAM_ADDR, addr);
+	return ocp_reg_read(tp, OCP_SRAM_DATA);
 }
 
 static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
@@ -715,20 +883,6 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
 	r8152_mdio_write(tp, reg, val);
 }
 
-static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
-{
-	u16 ocp_base, ocp_index;
-
-	ocp_base = addr & 0xf000;
-	if (ocp_base != tp->ocp_base) {
-		ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base);
-		tp->ocp_base = ocp_base;
-	}
-
-	ocp_index = (addr & 0x0fff) | 0xb000;
-	ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data);
-}
-
 static
 int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags);
 
@@ -814,10 +968,12 @@ static void read_bulk_callback(struct urb *urb)
 	case -ENOENT:
 		return;	/* the urb is in unlink state */
 	case -ETIME:
-		pr_warn_ratelimited("may be reset is needed?..\n");
+		if (net_ratelimit())
+			netdev_warn(netdev, "maybe reset is needed?\n");
 		break;
 	default:
-		pr_warn_ratelimited("Rx status %d\n", status);
+		if (net_ratelimit())
+			netdev_warn(netdev, "Rx status %d\n", status);
 		break;
 	}
 
@@ -850,7 +1006,8 @@ static void write_bulk_callback(struct urb *urb)
 
 	stats = rtl8152_get_stats(tp->netdev);
 	if (status) {
-		pr_warn_ratelimited("Tx status %d\n", status);
+		if (net_ratelimit())
+			netdev_warn(tp->netdev, "Tx status %d\n", status);
 		stats->tx_errors += agg->skb_num;
 	} else {
 		stats->tx_packets += agg->skb_num;
@@ -927,17 +1084,17 @@ resubmit:
 		netif_device_detach(tp->netdev);
 	else if (res)
 		netif_err(tp, intr, tp->netdev,
-			"can't resubmit intr, status %d\n", res);
+			  "can't resubmit intr, status %d\n", res);
 }
 
 static inline void *rx_agg_align(void *data)
 {
-	return (void *)ALIGN((uintptr_t)data, 8);
+	return (void *)ALIGN((uintptr_t)data, RX_ALIGN);
 }
 
 static inline void *tx_agg_align(void *data)
 {
-	return (void *)ALIGN((uintptr_t)data, 4);
+	return (void *)ALIGN((uintptr_t)data, TX_ALIGN);
 }
 
 static void free_all_mem(struct r8152 *tp)
@@ -945,40 +1102,28 @@ static void free_all_mem(struct r8152 *tp)
 	int i;
 
 	for (i = 0; i < RTL8152_MAX_RX; i++) {
-		if (tp->rx_info[i].urb) {
-			usb_free_urb(tp->rx_info[i].urb);
-			tp->rx_info[i].urb = NULL;
-		}
+		usb_free_urb(tp->rx_info[i].urb);
+		tp->rx_info[i].urb = NULL;
 
-		if (tp->rx_info[i].buffer) {
-			kfree(tp->rx_info[i].buffer);
-			tp->rx_info[i].buffer = NULL;
-			tp->rx_info[i].head = NULL;
-		}
+		kfree(tp->rx_info[i].buffer);
+		tp->rx_info[i].buffer = NULL;
+		tp->rx_info[i].head = NULL;
 	}
 
 	for (i = 0; i < RTL8152_MAX_TX; i++) {
-		if (tp->tx_info[i].urb) {
-			usb_free_urb(tp->tx_info[i].urb);
-			tp->tx_info[i].urb = NULL;
-		}
+		usb_free_urb(tp->tx_info[i].urb);
+		tp->tx_info[i].urb = NULL;
 
-		if (tp->tx_info[i].buffer) {
-			kfree(tp->tx_info[i].buffer);
-			tp->tx_info[i].buffer = NULL;
-			tp->tx_info[i].head = NULL;
-		}
+		kfree(tp->tx_info[i].buffer);
+		tp->tx_info[i].buffer = NULL;
+		tp->tx_info[i].head = NULL;
 	}
 
-	if (tp->intr_urb) {
-		usb_free_urb(tp->intr_urb);
-		tp->intr_urb = NULL;
-	}
+	usb_free_urb(tp->intr_urb);
+	tp->intr_urb = NULL;
 
-	if (tp->intr_buff) {
-		kfree(tp->intr_buff);
-		tp->intr_buff = NULL;
-	}
+	kfree(tp->intr_buff);
+	tp->intr_buff = NULL;
 }
 
 static int alloc_all_mem(struct r8152 *tp)
@@ -1006,7 +1151,8 @@ static int alloc_all_mem(struct r8152 *tp)
 
 		if (buf != rx_agg_align(buf)) {
 			kfree(buf);
-			buf = kmalloc_node(rx_buf_sz + 8, GFP_KERNEL, node);
+			buf = kmalloc_node(rx_buf_sz + RX_ALIGN, GFP_KERNEL,
+					   node);
 			if (!buf)
 				goto err1;
 		}
@@ -1031,7 +1177,8 @@ static int alloc_all_mem(struct r8152 *tp)
 
 		if (buf != tx_agg_align(buf)) {
 			kfree(buf);
-			buf = kmalloc_node(rx_buf_sz + 4, GFP_KERNEL, node);
+			buf = kmalloc_node(rx_buf_sz + TX_ALIGN, GFP_KERNEL,
+					   node);
 			if (!buf)
 				goto err1;
 		}
@@ -1231,7 +1378,7 @@ static void rx_bottom(struct r8152 *tp)
 
 			stats = rtl8152_get_stats(netdev);
 
-			pkt_len -= 4; /* CRC */
+			pkt_len -= CRC_SIZE;
 			rx_data += sizeof(struct rx_desc);
 
 			skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
@@ -1246,7 +1393,7 @@ static void rx_bottom(struct r8152 *tp)
 			stats->rx_packets++;
 			stats->rx_bytes += pkt_len;
 
-			rx_data = rx_agg_align(rx_data + pkt_len + 4);
+			rx_data = rx_agg_align(rx_data + pkt_len + CRC_SIZE);
 			rx_desc = (struct rx_desc *)rx_data;
 			len_used = (int)(rx_data - (u8 *)agg->head);
 			len_used += sizeof(struct rx_desc);
@@ -1336,7 +1483,7 @@ static void rtl8152_tx_timeout(struct net_device *netdev)
 	struct r8152 *tp = netdev_priv(netdev);
 	int i;
 
-	netif_warn(tp, tx_err, netdev, "Tx timeout.\n");
+	netif_warn(tp, tx_err, netdev, "Tx timeout\n");
 	for (i = 0; i < RTL8152_MAX_TX; i++)
 		usb_unlink_urb(tp->tx_info[i].urb);
 }
@@ -1449,13 +1596,11 @@ static inline u8 rtl8152_get_speed(struct r8152 *tp)
 	return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS);
 }
 
-static int rtl8152_enable(struct r8152 *tp)
+static void rtl_set_eee_plus(struct r8152 *tp)
 {
 	u32 ocp_data;
-	int i, ret;
 	u8 speed;
 
-	set_tx_qlen(tp);
 	speed = rtl8152_get_speed(tp);
 	if (speed & _10bps) {
 		ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
@@ -1466,6 +1611,12 @@ static int rtl8152_enable(struct r8152 *tp)
 		ocp_data &= ~EEEP_CR_EEEP_TX;
 		ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
 	}
+}
+
+static int rtl_enable(struct r8152 *tp)
+{
+	u32 ocp_data;
+	int i, ret;
 
 	r8152b_reset_packet_filter(tp);
 
@@ -1487,6 +1638,47 @@ static int rtl8152_enable(struct r8152 *tp)
 	return ret;
 }
 
+static int rtl8152_enable(struct r8152 *tp)
+{
+	set_tx_qlen(tp);
+	rtl_set_eee_plus(tp);
+
+	return rtl_enable(tp);
+}
+
+static void r8153_set_rx_agg(struct r8152 *tp)
+{
+	u8 speed;
+
+	speed = rtl8152_get_speed(tp);
+	if (speed & _1000bps) {
+		if (tp->udev->speed == USB_SPEED_SUPER) {
+			ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH,
+					RX_THR_SUPPER);
+			ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG,
+					EARLY_AGG_SUPPER);
+		} else {
+			ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH,
+					RX_THR_HIGH);
+			ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG,
+					EARLY_AGG_HIGH);
+		}
+	} else {
+		ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_SLOW);
+		ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG,
+				EARLY_AGG_SLOW);
+	}
+}
+
+static int rtl8153_enable(struct r8152 *tp)
+{
+	set_tx_qlen(tp);
+	rtl_set_eee_plus(tp);
+	r8153_set_rx_agg(tp);
+
+	return rtl_enable(tp);
+}
+
 static void rtl8152_disable(struct r8152 *tp)
 {
 	struct net_device_stats *stats = rtl8152_get_stats(tp->netdev);
@@ -1596,7 +1788,7 @@ static void r8152b_exit_oob(struct r8152 *tp)
 	ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL);
 
 	ocp_write_byte(tp, MCU_TYPE_USB, USB_TX_AGG, TX_AGG_MAX_THRESHOLD);
-	ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_BUF_THR);
+	ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_HIGH);
 	ocp_write_dword(tp, MCU_TYPE_USB, USB_TX_DMA,
 			TEST_MODE_DISABLE | TX_SIZE_ADJUST1);
 
@@ -1613,8 +1805,8 @@ static void r8152b_exit_oob(struct r8152 *tp)
 
 static void r8152b_enter_oob(struct r8152 *tp)
 {
-	u32	ocp_data;
-	int	i;
+	u32 ocp_data;
+	int i;
 
 	ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
 	ocp_data &= ~NOW_IS_OOB;
@@ -1685,15 +1877,269 @@ static inline void r8152b_enable_aldps(struct r8152 *tp)
 					    LINKENA | DIS_SDSAVE);
 }
 
+static void r8153_hw_phy_cfg(struct r8152 *tp)
+{
+	u32 ocp_data;
+	u16 data;
+
+	ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+	r8152_mdio_write(tp, MII_BMCR, BMCR_ANENABLE);
+
+	if (tp->version == RTL_VER_03) {
+		data = ocp_reg_read(tp, OCP_EEE_CFG);
+		data &= ~CTAP_SHORT_EN;
+		ocp_reg_write(tp, OCP_EEE_CFG, data);
+	}
+
+	data = ocp_reg_read(tp, OCP_POWER_CFG);
+	data |= EEE_CLKDIV_EN;
+	ocp_reg_write(tp, OCP_POWER_CFG, data);
+
+	data = ocp_reg_read(tp, OCP_DOWN_SPEED);
+	data |= EN_10M_BGOFF;
+	ocp_reg_write(tp, OCP_DOWN_SPEED, data);
+	data = ocp_reg_read(tp, OCP_POWER_CFG);
+	data |= EN_10M_PLLOFF;
+	ocp_reg_write(tp, OCP_POWER_CFG, data);
+	data = sram_read(tp, SRAM_IMPEDANCE);
+	data &= ~RX_DRIVING_MASK;
+	sram_write(tp, SRAM_IMPEDANCE, data);
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
+	ocp_data |= PFM_PWM_SWITCH;
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
+
+	data = sram_read(tp, SRAM_LPF_CFG);
+	data |= LPF_AUTO_TUNE;
+	sram_write(tp, SRAM_LPF_CFG, data);
+
+	data = sram_read(tp, SRAM_10M_AMP1);
+	data |= GDAC_IB_UPALL;
+	sram_write(tp, SRAM_10M_AMP1, data);
+	data = sram_read(tp, SRAM_10M_AMP2);
+	data |= AMP_DN;
+	sram_write(tp, SRAM_10M_AMP2, data);
+}
+
+static void r8153_u1u2en(struct r8152 *tp, int enable)
+{
+	u8 u1u2[8];
+
+	if (enable)
+		memset(u1u2, 0xff, sizeof(u1u2));
+	else
+		memset(u1u2, 0x00, sizeof(u1u2));
+
+	usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
+}
+
+static void r8153_u2p3en(struct r8152 *tp, int enable)
+{
+	u32 ocp_data;
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
+	if (enable)
+		ocp_data |= U2P3_ENABLE;
+	else
+		ocp_data &= ~U2P3_ENABLE;
+	ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
+}
+
+static void r8153_power_cut_en(struct r8152 *tp, int enable)
+{
+	u32 ocp_data;
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
+	if (enable)
+		ocp_data |= PWR_EN | PHASE2_EN;
+	else
+		ocp_data &= ~(PWR_EN | PHASE2_EN);
+	ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+	ocp_data &= ~PCUT_STATUS;
+	ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
+}
+
+static void r8153_teredo_off(struct r8152 *tp)
+{
+	u32 ocp_data;
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
+	ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN);
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
+
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE);
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0);
+	ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
+}
+
+static void r8153_first_init(struct r8152 *tp)
+{
+	u32 ocp_data;
+	int i;
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
+	ocp_data |= RXDY_GATED_EN;
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+
+	r8153_teredo_off(tp);
+
+	ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+	ocp_data &= ~RCR_ACPT_ALL;
+	ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+
+	r8153_hw_phy_cfg(tp);
+
+	rtl8152_nic_reset(tp);
+
+	ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+	ocp_data &= ~NOW_IS_OOB;
+	ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+	ocp_data &= ~MCU_BORW_EN;
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+	for (i = 0; i < 1000; i++) {
+		ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+		if (ocp_data & LINK_LIST_READY)
+			break;
+		mdelay(1);
+	}
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+	ocp_data |= RE_INIT_LL;
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+	for (i = 0; i < 1000; i++) {
+		ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+		if (ocp_data & LINK_LIST_READY)
+			break;
+		mdelay(1);
+	}
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
+	ocp_data &= ~CPCR_RX_VLAN;
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
+
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0);
+	ocp_data |= TCR0_AUTO_FIFO;
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR0, ocp_data);
+
+	rtl8152_nic_reset(tp);
+
+	/* rx share fifo credit full threshold */
+	ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL);
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_NORMAL);
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_NORMAL);
+	/* TX share fifo free credit full threshold */
+	ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL2);
+
+	/* rx aggregation */
+	ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
+	ocp_data &= ~RX_AGG_DISABLE;
+	ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
+}
+
+static void r8153_enter_oob(struct r8152 *tp)
+{
+	u32 ocp_data;
+	int i;
+
+	ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+	ocp_data &= ~NOW_IS_OOB;
+	ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+	rtl8152_disable(tp);
+
+	for (i = 0; i < 1000; i++) {
+		ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+		if (ocp_data & LINK_LIST_READY)
+			break;
+		mdelay(1);
+	}
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+	ocp_data |= RE_INIT_LL;
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+	for (i = 0; i < 1000; i++) {
+		ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+		if (ocp_data & LINK_LIST_READY)
+			break;
+		mdelay(1);
+	}
+
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
+	ocp_data |= MAGIC_EN;
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
+	ocp_data &= ~TEREDO_WAKE_MASK;
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
+	ocp_data |= CPCR_RX_VLAN;
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PAL_BDC_CR);
+	ocp_data |= ALDPS_PROXY_MODE;
+	ocp_write_word(tp, MCU_TYPE_PLA, PAL_BDC_CR, ocp_data);
+
+	ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+	ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
+	ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+	ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5, LAN_WAKE_EN);
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
+	ocp_data &= ~RXDY_GATED_EN;
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+
+	ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+	ocp_data |= RCR_APM | RCR_AM | RCR_AB;
+	ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+}
+
+static void r8153_disable_aldps(struct r8152 *tp)
+{
+	u16 data;
+
+	data = ocp_reg_read(tp, OCP_POWER_CFG);
+	data &= ~EN_ALDPS;
+	ocp_reg_write(tp, OCP_POWER_CFG, data);
+	msleep(20);
+}
+
+static void r8153_enable_aldps(struct r8152 *tp)
+{
+	u16 data;
+
+	data = ocp_reg_read(tp, OCP_POWER_CFG);
+	data |= EN_ALDPS;
+	ocp_reg_write(tp, OCP_POWER_CFG, data);
+}
+
 static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
 {
-	u16 bmcr, anar;
+	u16 bmcr, anar, gbcr;
 	int ret = 0;
 
 	cancel_delayed_work_sync(&tp->schedule);
 	anar = r8152_mdio_read(tp, MII_ADVERTISE);
 	anar &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
 		  ADVERTISE_100HALF | ADVERTISE_100FULL);
+	if (tp->mii.supports_gmii) {
+		gbcr = r8152_mdio_read(tp, MII_CTRL1000);
+		gbcr &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+	} else {
+		gbcr = 0;
+	}
 
 	if (autoneg == AUTONEG_DISABLE) {
 		if (speed == SPEED_10) {
@@ -1702,6 +2148,9 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
 		} else if (speed == SPEED_100) {
 			bmcr = BMCR_SPEED100;
 			anar |= ADVERTISE_100HALF | ADVERTISE_100FULL;
+		} else if (speed == SPEED_1000 && tp->mii.supports_gmii) {
+			bmcr = BMCR_SPEED1000;
+			gbcr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
 		} else {
 			ret = -EINVAL;
 			goto out;
@@ -1723,6 +2172,16 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
 				anar |= ADVERTISE_10HALF;
 				anar |= ADVERTISE_100HALF;
 			}
+		} else if (speed == SPEED_1000 && tp->mii.supports_gmii) {
+			if (duplex == DUPLEX_FULL) {
+				anar |= ADVERTISE_10HALF | ADVERTISE_10FULL;
+				anar |= ADVERTISE_100HALF | ADVERTISE_100FULL;
+				gbcr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
+			} else {
+				anar |= ADVERTISE_10HALF;
+				anar |= ADVERTISE_100HALF;
+				gbcr |= ADVERTISE_1000HALF;
+			}
 		} else {
 			ret = -EINVAL;
 			goto out;
@@ -1731,6 +2190,9 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
 		bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
 	}
 
+	if (tp->mii.supports_gmii)
+		r8152_mdio_write(tp, MII_CTRL1000, gbcr);
+
 	r8152_mdio_write(tp, MII_ADVERTISE, anar);
 	r8152_mdio_write(tp, MII_BMCR, bmcr);
 
@@ -1752,6 +2214,15 @@ static void rtl8152_down(struct r8152 *tp)
 	r8152b_enable_aldps(tp);
 }
 
+static void rtl8153_down(struct r8152 *tp)
+{
+	r8153_u1u2en(tp, 0);
+	r8153_power_cut_en(tp, 0);
+	r8153_disable_aldps(tp);
+	r8153_enter_oob(tp);
+	r8153_enable_aldps(tp);
+}
+
 static void set_carrier(struct r8152 *tp)
 {
 	struct net_device *netdev = tp->netdev;
@@ -1762,7 +2233,7 @@ static void set_carrier(struct r8152 *tp)
 
 	if (speed & LINK_STATUS) {
 		if (!(tp->speed & LINK_STATUS)) {
-			rtl8152_enable(tp);
+			tp->rtl_ops.enable(tp);
 			set_bit(RTL8152_SET_RX_MODE, &tp->flags);
 			netif_carrier_on(netdev);
 		}
@@ -1770,7 +2241,7 @@ static void set_carrier(struct r8152 *tp)
 		if (tp->speed & LINK_STATUS) {
 			netif_carrier_off(netdev);
 			tasklet_disable(&tp->tl);
-			rtl8152_disable(tp);
+			tp->rtl_ops.disable(tp);
 			tasklet_enable(&tp->tl);
 		}
 	}
@@ -1806,12 +2277,14 @@ static int rtl8152_open(struct net_device *netdev)
 	if (res) {
 		if (res == -ENODEV)
 			netif_device_detach(tp->netdev);
-		netif_warn(tp, ifup, netdev,
-			"intr_urb submit failed: %d\n", res);
+		netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
+			   res);
 		return res;
 	}
 
-	rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
+	rtl8152_set_speed(tp, AUTONEG_ENABLE,
+			  tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
+			  DUPLEX_FULL);
 	tp->speed = 0;
 	netif_carrier_off(netdev);
 	netif_start_queue(netdev);
@@ -1830,7 +2303,7 @@ static int rtl8152_close(struct net_device *netdev)
 	cancel_delayed_work_sync(&tp->schedule);
 	netif_stop_queue(netdev);
 	tasklet_disable(&tp->tl);
-	rtl8152_disable(tp);
+	tp->rtl_ops.disable(tp);
 	tasklet_enable(&tp->tl);
 
 	return res;
@@ -1851,9 +2324,16 @@ static void rtl_clear_bp(struct r8152 *tp)
 	ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
 }
 
+static void r8153_clear_bp(struct r8152 *tp)
+{
+	ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
+	ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
+	rtl_clear_bp(tp);
+}
+
 static void r8152b_enable_eee(struct r8152 *tp)
 {
-	u32	ocp_data;
+	u32 ocp_data;
 
 	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
 	ocp_data |= EEE_RX_EN | EEE_TX_EN;
@@ -1874,6 +2354,22 @@ static void r8152b_enable_eee(struct r8152 *tp)
 	ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
 }
 
+static void r8153_enable_eee(struct r8152 *tp)
+{
+	u32 ocp_data;
+	u16 data;
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
+	ocp_data |= EEE_RX_EN | EEE_TX_EN;
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
+	data = ocp_reg_read(tp, OCP_EEE_CFG);
+	data |= EEE10_EN;
+	ocp_reg_write(tp, OCP_EEE_CFG, data);
+	data = ocp_reg_read(tp, OCP_EEE_CFG2);
+	data |= MY1000_EEE | MY100_EEE;
+	ocp_reg_write(tp, OCP_EEE_CFG2, data);
+}
+
 static void r8152b_enable_fc(struct r8152 *tp)
 {
 	u16 anar;
@@ -1909,7 +2405,7 @@ static void r8152b_init(struct r8152 *tp)
 	ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
 
 	ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
-	ocp_data &= ~RWSUME_INDICATE;
+	ocp_data &= ~RESUME_INDICATE;
 	ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
 
 	r8152b_exit_oob(tp);
@@ -1943,6 +2439,75 @@ static void r8152b_init(struct r8152 *tp)
 	ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
 }
 
+static void r8153_init(struct r8152 *tp)
+{
+	u32 ocp_data;
+	int i;
+
+	r8153_u1u2en(tp, 0);
+
+	for (i = 0; i < 500; i++) {
+		if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
+		    AUTOLOAD_DONE)
+			break;
+		msleep(20);
+	}
+
+	for (i = 0; i < 500; i++) {
+		ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK;
+		if (ocp_data == PHY_STAT_LAN_ON || ocp_data == PHY_STAT_PWRDN)
+			break;
+		msleep(20);
+	}
+
+	r8153_u2p3en(tp, 0);
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL);
+	ocp_data &= ~TIMER11_EN;
+	ocp_write_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL, ocp_data);
+
+	r8153_clear_bp(tp);
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
+	ocp_data &= ~LED_MODE_MASK;
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
+
+	ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL);
+	ocp_data &= ~LPM_TIMER_MASK;
+	if (tp->udev->speed == USB_SPEED_SUPER)
+		ocp_data |= LPM_TIMER_500US;
+	else
+		ocp_data |= LPM_TIMER_500MS;
+	ocp_write_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL, ocp_data);
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2);
+	ocp_data &= ~SEN_VAL_MASK;
+	ocp_data |= SEN_VAL_NORMAL | SEL_RXIDLE;
+	ocp_write_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2, ocp_data);
+
+	r8153_power_cut_en(tp, 0);
+	r8153_u1u2en(tp, 1);
+
+	r8153_first_init(tp);
+
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO);
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO);
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3,
+		       PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN |
+		       U1U2_SPDWN_EN | L1_SPDWN_EN);
+	ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
+		       PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
+		       TP100_SPDWN_EN | TP500_SPDWN_EN | TP1000_SPDWN_EN |
+		       EEE_SPDWN_EN);
+
+	r8153_enable_eee(tp);
+	r8153_enable_aldps(tp);
+	r8152b_enable_fc(tp);
+
+	r8152_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE |
+				       BMCR_ANRESTART);
+}
+
 static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
 {
 	struct r8152 *tp = usb_get_intfdata(intf);
@@ -1956,7 +2521,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
 		tasklet_disable(&tp->tl);
 	}
 
-	rtl8152_down(tp);
+	tp->rtl_ops.down(tp);
 
 	return 0;
 }
@@ -1965,10 +2530,12 @@ static int rtl8152_resume(struct usb_interface *intf)
 {
 	struct r8152 *tp = usb_get_intfdata(intf);
 
-	r8152b_init(tp);
+	tp->rtl_ops.init(tp);
 	netif_device_attach(tp->netdev);
 	if (netif_running(tp->netdev)) {
-		rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
+		rtl8152_set_speed(tp, AUTONEG_ENABLE,
+				tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
+				DUPLEX_FULL);
 		tp->speed = 0;
 		netif_carrier_off(tp->netdev);
 		set_bit(WORK_ENABLE, &tp->flags);
@@ -2072,6 +2639,18 @@ static void r8152b_get_version(struct r8152 *tp)
 	case 0x4c10:
 		tp->version = RTL_VER_02;
 		break;
+	case 0x5c00:
+		tp->version = RTL_VER_03;
+		tp->mii.supports_gmii = 1;
+		break;
+	case 0x5c10:
+		tp->version = RTL_VER_04;
+		tp->mii.supports_gmii = 1;
+		break;
+	case 0x5c20:
+		tp->version = RTL_VER_05;
+		tp->mii.supports_gmii = 1;
+		break;
 	default:
 		netif_info(tp, probe, tp->netdev,
 			   "Unknown version 0x%04x\n", version);
@@ -2079,6 +2658,80 @@ static void r8152b_get_version(struct r8152 *tp)
 	}
 }
 
+static void rtl8152_unload(struct r8152 *tp)
+{
+	u32	ocp_data;
+
+	if (tp->version != RTL_VER_01) {
+		ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
+		ocp_data |= POWER_CUT;
+		ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
+	}
+
+	ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
+	ocp_data &= ~RESUME_INDICATE;
+	ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
+}
+
+static void rtl8153_unload(struct r8152 *tp)
+{
+	r8153_power_cut_en(tp, 1);
+}
+
+static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
+{
+	struct rtl_ops *ops = &tp->rtl_ops;
+	int ret = -ENODEV;
+
+	switch (id->idVendor) {
+	case VENDOR_ID_REALTEK:
+		switch (id->idProduct) {
+		case PRODUCT_ID_RTL8152:
+			ops->init		= r8152b_init;
+			ops->enable		= rtl8152_enable;
+			ops->disable		= rtl8152_disable;
+			ops->down		= rtl8152_down;
+			ops->unload		= rtl8152_unload;
+			ret = 0;
+			break;
+		case PRODUCT_ID_RTL8153:
+			ops->init		= r8153_init;
+			ops->enable		= rtl8153_enable;
+			ops->disable		= rtl8152_disable;
+			ops->down		= rtl8153_down;
+			ops->unload		= rtl8153_unload;
+			ret = 0;
+			break;
+		default:
+			break;
+		}
+		break;
+
+	case VENDOR_ID_SAMSUNG:
+		switch (id->idProduct) {
+		case PRODUCT_ID_SAMSUNG:
+			ops->init		= r8153_init;
+			ops->enable		= rtl8153_enable;
+			ops->disable		= rtl8152_disable;
+			ops->down		= rtl8153_down;
+			ops->unload		= rtl8153_unload;
+			ret = 0;
+			break;
+		default:
+			break;
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	if (ret)
+		netif_err(tp, probe, tp->netdev, "Unknown Device\n");
+
+	return ret;
+}
+
 static int rtl8152_probe(struct usb_interface *intf,
 			 const struct usb_device_id *id)
 {
@@ -2087,14 +2740,9 @@ static int rtl8152_probe(struct usb_interface *intf,
 	struct net_device *netdev;
 	int ret;
 
-	if (udev->actconfig->desc.bConfigurationValue != 1) {
-		usb_driver_set_configuration(udev, 1);
-		return -ENODEV;
-	}
-
 	netdev = alloc_etherdev(sizeof(struct r8152));
 	if (!netdev) {
-		dev_err(&intf->dev, "Out of memory");
+		dev_err(&intf->dev, "Out of memory\n");
 		return -ENOMEM;
 	}
 
@@ -2102,12 +2750,17 @@ static int rtl8152_probe(struct usb_interface *intf,
 	tp = netdev_priv(netdev);
 	tp->msg_enable = 0x7FFF;
 
-	tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
-	INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
-
 	tp->udev = udev;
 	tp->netdev = netdev;
 	tp->intf = intf;
+
+	ret = rtl_ops_init(tp, id);
+	if (ret)
+		goto out;
+
+	tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
+	INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
+
 	netdev->netdev_ops = &rtl8152_netdev_ops;
 	netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
 
@@ -2124,7 +2777,7 @@ static int rtl8152_probe(struct usb_interface *intf,
 	tp->mii.supports_gmii = 0;
 
 	r8152b_get_version(tp);
-	r8152b_init(tp);
+	tp->rtl_ops.init(tp);
 	set_ethernet_addr(tp);
 
 	ret = alloc_all_mem(tp);
@@ -2135,11 +2788,11 @@ static int rtl8152_probe(struct usb_interface *intf,
 
 	ret = register_netdev(netdev);
 	if (ret != 0) {
-		netif_err(tp, probe, netdev, "couldn't register the device");
+		netif_err(tp, probe, netdev, "couldn't register the device\n");
 		goto out1;
 	}
 
-	netif_info(tp, probe, netdev, "%s", DRIVER_VERSION);
+	netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
 
 	return 0;
 
@@ -2150,21 +2803,6 @@ out:
 	return ret;
 }
 
-static void rtl8152_unload(struct r8152 *tp)
-{
-	u32	ocp_data;
-
-	if (tp->version != RTL_VER_01) {
-		ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
-		ocp_data |= POWER_CUT;
-		ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
-	}
-
-	ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
-	ocp_data &= ~RWSUME_INDICATE;
-	ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
-}
-
 static void rtl8152_disconnect(struct usb_interface *intf)
 {
 	struct r8152 *tp = usb_get_intfdata(intf);
@@ -2174,7 +2812,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
 		set_bit(RTL8152_UNPLUG, &tp->flags);
 		tasklet_kill(&tp->tl);
 		unregister_netdev(tp->netdev);
-		rtl8152_unload(tp);
+		tp->rtl_ops.unload(tp);
 		free_all_mem(tp);
 		free_netdev(tp->netdev);
 	}
@@ -2182,7 +2820,9 @@ static void rtl8152_disconnect(struct usb_interface *intf)
 
 /* table of devices that work with this driver */
 static struct usb_device_id rtl8152_table[] = {
-	{USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)},
+	{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)},
+	{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)},
+	{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)},
 	{}
 };
 
diff --git a/drivers/net/usb/r815x.c b/drivers/net/usb/r815x.c
index 2df2f4fb42a7..f0a8791b7636 100644
--- a/drivers/net/usb/r815x.c
+++ b/drivers/net/usb/r815x.c
@@ -216,21 +216,13 @@ static const struct usb_device_id products[] = {
 {
 	USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
-#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE)
-	.driver_info = 0,
-#else
 	.driver_info = (unsigned long) &r8152_info,
-#endif
 },
 
 {
 	USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
-#if defined(CONFIG_USB_RTL8153) || defined(CONFIG_USB_RTL8153_MODULE)
-	.driver_info = 0,
-#else
 	.driver_info = (unsigned long) &r8153_info,
-#endif
 },
 
 	{ },		/* END */
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index cc49aac70224..a48bc0f20c1a 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -13,11 +13,9 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 6cbdac67f3a0..da2c4583bd2d 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -6,7 +6,6 @@
  * version 2 as published by the Free Software Foundation.
  */
 
-#include <linux/init.h>
 #include <linux/signal.h>
 #include <linux/slab.h>
 #include <linux/module.h>
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index a79e9d334928..a251588762ec 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -21,8 +21,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #define DRIVER_VERSION "v.2.0"
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 66ebbacf066f..f17b9e02dd34 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -13,14 +13,12 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  *****************************************************************************/
 
 #include <linux/module.h>
 #include <linux/kmod.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/usb/smsc75xx.h b/drivers/net/usb/smsc75xx.h
index 67eba39e6ee2..2c7ea8fd184f 100644
--- a/drivers/net/usb/smsc75xx.h
+++ b/drivers/net/usb/smsc75xx.h
@@ -13,8 +13,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  *****************************************************************************/
 
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 3f38ba868f61..8dd54a0f7b29 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -13,14 +13,12 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  *****************************************************************************/
 
 #include <linux/module.h>
 #include <linux/kmod.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/usb/smsc95xx.h b/drivers/net/usb/smsc95xx.h
index f360ee372554..526faa0c44e6 100644
--- a/drivers/net/usb/smsc95xx.h
+++ b/drivers/net/usb/smsc95xx.h
@@ -13,8 +13,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  *****************************************************************************/
 
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 7ec3e0ee0783..99b69af14274 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -13,7 +13,6 @@
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/stddef.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index aba04f561760..4671da755e7b 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index 35c90307d473..6aaa6eb9df72 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -13,15 +13,13 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 // #define	DEBUG			// error path messages, extra info
 // #define	VERBOSE			// more; success messages
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/ethtool.h>
 #include <linux/workqueue.h>
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5d776447d9c3..d75f8edf4fb3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -13,8 +13,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 //#define DEBUG
 #include <linux/netdevice.h>
@@ -27,6 +26,7 @@
 #include <linux/if_vlan.h>
 #include <linux/slab.h>
 #include <linux/cpu.h>
+#include <linux/average.h>
 
 static int napi_weight = NAPI_POLL_WEIGHT;
 module_param(napi_weight, int, 0444);
@@ -37,11 +37,18 @@ module_param(gso, bool, 0444);
 
 /* FIXME: MTU in config. */
 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
-#define MERGE_BUFFER_LEN (ALIGN(GOOD_PACKET_LEN + \
-                                sizeof(struct virtio_net_hdr_mrg_rxbuf), \
-                                L1_CACHE_BYTES))
 #define GOOD_COPY_LEN	128
 
+/* Weight used for the RX packet size EWMA. The average packet size is used to
+ * determine the packet buffer size when refilling RX rings. As the entire RX
+ * ring may be refilled at once, the weight is chosen so that the EWMA will be
+ * insensitive to short-term, transient changes in packet size.
+ */
+#define RECEIVE_AVG_WEIGHT 64
+
+/* Minimum alignment for mergeable packet buffers. */
+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
+
 #define VIRTNET_DRIVER_VERSION "1.0.0"
 
 struct virtnet_stats {
@@ -73,12 +80,15 @@ struct receive_queue {
 
 	struct napi_struct napi;
 
-	/* Number of input buffers, and max we've ever had. */
-	unsigned int num, max;
-
 	/* Chain pages by the private ptr. */
 	struct page *pages;
 
+	/* Average packet length for mergeable receive buffers. */
+	struct ewma mrg_avg_pkt_len;
+
+	/* Page frag for packet buffer allocation. */
+	struct page_frag alloc_frag;
+
 	/* RX: fragments + linear part + virtio header */
 	struct scatterlist sg[MAX_SKB_FRAGS + 2];
 
@@ -127,11 +137,6 @@ struct virtnet_info {
 	/* Lock for config space updates */
 	struct mutex config_lock;
 
-	/* Page_frag for GFP_KERNEL packet buffer allocation when we run
-	 * low on memory.
-	 */
-	struct page_frag alloc_frag;
-
 	/* Does the affinity hint is set for virtqueues? */
 	bool affinity_hint_set;
 
@@ -222,6 +227,24 @@ static void skb_xmit_done(struct virtqueue *vq)
 	netif_wake_subqueue(vi->dev, vq2txq(vq));
 }
 
+static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
+{
+	unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1);
+	return (truesize + 1) * MERGEABLE_BUFFER_ALIGN;
+}
+
+static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx)
+{
+	return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN);
+
+}
+
+static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize)
+{
+	unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN;
+	return (unsigned long)buf | (size - 1);
+}
+
 /* Called from bottom half context */
 static struct sk_buff *page_to_skb(struct receive_queue *rq,
 				   struct page *page, unsigned int offset,
@@ -330,38 +353,34 @@ err:
 
 static struct sk_buff *receive_mergeable(struct net_device *dev,
 					 struct receive_queue *rq,
-					 void *buf,
+					 unsigned long ctx,
 					 unsigned int len)
 {
+	void *buf = mergeable_ctx_to_buf_address(ctx);
 	struct skb_vnet_hdr *hdr = buf;
 	int num_buf = hdr->mhdr.num_buffers;
 	struct page *page = virt_to_head_page(buf);
 	int offset = buf - page_address(page);
-	struct sk_buff *head_skb = page_to_skb(rq, page, offset, len,
-					       MERGE_BUFFER_LEN);
+	unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
+
+	struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, truesize);
 	struct sk_buff *curr_skb = head_skb;
 
 	if (unlikely(!curr_skb))
 		goto err_skb;
-
 	while (--num_buf) {
 		int num_skb_frags;
 
-		buf = virtqueue_get_buf(rq->vq, &len);
-		if (unlikely(!buf)) {
+		ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
+		if (unlikely(!ctx)) {
 			pr_debug("%s: rx error: %d buffers out of %d missing\n",
 				 dev->name, num_buf, hdr->mhdr.num_buffers);
 			dev->stats.rx_length_errors++;
 			goto err_buf;
 		}
-		if (unlikely(len > MERGE_BUFFER_LEN)) {
-			pr_debug("%s: rx error: merge buffer too long\n",
-				 dev->name);
-			len = MERGE_BUFFER_LEN;
-		}
 
+		buf = mergeable_ctx_to_buf_address(ctx);
 		page = virt_to_head_page(buf);
-		--rq->num;
 
 		num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
 		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
@@ -377,37 +396,38 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
 			head_skb->truesize += nskb->truesize;
 			num_skb_frags = 0;
 		}
+		truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
 		if (curr_skb != head_skb) {
 			head_skb->data_len += len;
 			head_skb->len += len;
-			head_skb->truesize += MERGE_BUFFER_LEN;
+			head_skb->truesize += truesize;
 		}
 		offset = buf - page_address(page);
 		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
 			put_page(page);
 			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
-					     len, MERGE_BUFFER_LEN);
+					     len, truesize);
 		} else {
 			skb_add_rx_frag(curr_skb, num_skb_frags, page,
-					offset, len, MERGE_BUFFER_LEN);
+					offset, len, truesize);
 		}
 	}
 
+	ewma_add(&rq->mrg_avg_pkt_len, head_skb->len);
 	return head_skb;
 
 err_skb:
 	put_page(page);
 	while (--num_buf) {
-		buf = virtqueue_get_buf(rq->vq, &len);
-		if (unlikely(!buf)) {
+		ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
+		if (unlikely(!ctx)) {
 			pr_debug("%s: rx error: %d buffers missing\n",
 				 dev->name, num_buf);
 			dev->stats.rx_length_errors++;
 			break;
 		}
-		page = virt_to_head_page(buf);
+		page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx));
 		put_page(page);
-		--rq->num;
 	}
 err_buf:
 	dev->stats.rx_dropped++;
@@ -426,17 +446,20 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
 	if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
 		pr_debug("%s: short packet %i\n", dev->name, len);
 		dev->stats.rx_length_errors++;
-		if (vi->mergeable_rx_bufs)
-			put_page(virt_to_head_page(buf));
-		else if (vi->big_packets)
+		if (vi->mergeable_rx_bufs) {
+			unsigned long ctx = (unsigned long)buf;
+			void *base = mergeable_ctx_to_buf_address(ctx);
+			put_page(virt_to_head_page(base));
+		} else if (vi->big_packets) {
 			give_pages(rq, buf);
-		else
+		} else {
 			dev_kfree_skb(buf);
+		}
 		return;
 	}
 
 	if (vi->mergeable_rx_bufs)
-		skb = receive_mergeable(dev, rq, buf, len);
+		skb = receive_mergeable(dev, rq, (unsigned long)buf, len);
 	else if (vi->big_packets)
 		skb = receive_big(dev, rq, buf, len);
 	else
@@ -577,28 +600,45 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
 	return err;
 }
 
+static unsigned int get_mergeable_buf_len(struct ewma *avg_pkt_len)
+{
+	const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+	unsigned int len;
+
+	len = hdr_len + clamp_t(unsigned int, ewma_read(avg_pkt_len),
+			GOOD_PACKET_LEN, PAGE_SIZE - hdr_len);
+	return ALIGN(len, MERGEABLE_BUFFER_ALIGN);
+}
+
 static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
 {
-	struct virtnet_info *vi = rq->vq->vdev->priv;
-	char *buf = NULL;
+	struct page_frag *alloc_frag = &rq->alloc_frag;
+	char *buf;
+	unsigned long ctx;
 	int err;
+	unsigned int len, hole;
 
-	if (gfp & __GFP_WAIT) {
-		if (skb_page_frag_refill(MERGE_BUFFER_LEN, &vi->alloc_frag,
-					 gfp)) {
-			buf = (char *)page_address(vi->alloc_frag.page) +
-			      vi->alloc_frag.offset;
-			get_page(vi->alloc_frag.page);
-			vi->alloc_frag.offset += MERGE_BUFFER_LEN;
-		}
-	} else {
-		buf = netdev_alloc_frag(MERGE_BUFFER_LEN);
-	}
-	if (!buf)
+	len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len);
+	if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
 		return -ENOMEM;
 
-	sg_init_one(rq->sg, buf, MERGE_BUFFER_LEN);
-	err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
+	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
+	ctx = mergeable_buf_to_ctx(buf, len);
+	get_page(alloc_frag->page);
+	alloc_frag->offset += len;
+	hole = alloc_frag->size - alloc_frag->offset;
+	if (hole < len) {
+		/* To avoid internal fragmentation, if there is very likely not
+		 * enough space for another buffer, add the remaining space to
+		 * the current buffer. This extra space is not included in
+		 * the truesize stored in ctx.
+		 */
+		len += hole;
+		alloc_frag->offset += hole;
+	}
+
+	sg_init_one(rq->sg, buf, len);
+	err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp);
 	if (err < 0)
 		put_page(virt_to_head_page(buf));
 
@@ -618,6 +658,7 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
 	int err;
 	bool oom;
 
+	gfp |= __GFP_COLD;
 	do {
 		if (vi->mergeable_rx_bufs)
 			err = add_recvbuf_mergeable(rq, gfp);
@@ -629,10 +670,7 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
 		oom = err == -ENOMEM;
 		if (err)
 			break;
-		++rq->num;
 	} while (rq->vq->num_free);
-	if (unlikely(rq->num > rq->max))
-		rq->max = rq->num;
 	if (unlikely(!virtqueue_kick(rq->vq)))
 		return false;
 	return !oom;
@@ -700,11 +738,10 @@ again:
 	while (received < budget &&
 	       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
 		receive_buf(rq, buf, len);
-		--rq->num;
 		received++;
 	}
 
-	if (rq->num < rq->max / 2) {
+	if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
 		if (!try_fill_recv(rq, GFP_ATOMIC))
 			schedule_delayed_work(&vi->refill, 0);
 	}
@@ -874,16 +911,15 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
 /*
  * Send command via the control virtqueue and check status.  Commands
  * supported by the hypervisor, as indicated by feature bits, should
- * never fail unless improperly formated.
+ * never fail unless improperly formatted.
  */
 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
-				 struct scatterlist *out,
-				 struct scatterlist *in)
+				 struct scatterlist *out)
 {
 	struct scatterlist *sgs[4], hdr, stat;
 	struct virtio_net_ctrl_hdr ctrl;
 	virtio_net_ctrl_ack status = ~0;
-	unsigned out_num = 0, in_num = 0, tmp;
+	unsigned out_num = 0, tmp;
 
 	/* Caller should know better */
 	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
@@ -896,16 +932,13 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
 
 	if (out)
 		sgs[out_num++] = out;
-	if (in)
-		sgs[out_num + in_num++] = in;
 
 	/* Add return status. */
 	sg_init_one(&stat, &status, sizeof(status));
-	sgs[out_num + in_num++] = &stat;
+	sgs[out_num] = &stat;
 
-	BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
-	BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
-	       < 0);
+	BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
+	BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC) < 0);
 
 	if (unlikely(!virtqueue_kick(vi->cvq)))
 		return status == VIRTIO_NET_OK;
@@ -935,8 +968,7 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
 		sg_init_one(&sg, addr->sa_data, dev->addr_len);
 		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
-					  VIRTIO_NET_CTRL_MAC_ADDR_SET,
-					  &sg, NULL)) {
+					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
 			dev_warn(&vdev->dev,
 				 "Failed to set mac address by vq command.\n");
 			return -EINVAL;
@@ -1009,7 +1041,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
 {
 	rtnl_lock();
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
-				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
+				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
 		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
 	rtnl_unlock();
 }
@@ -1027,7 +1059,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
 	sg_init_one(&sg, &s, sizeof(s));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
-				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
+				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
 		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
 			 queue_pairs);
 		return -EINVAL;
@@ -1067,7 +1099,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
 	void *buf;
 	int i;
 
-	/* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
+	/* We can't dynamically set ndo_set_rx_mode, so return gracefully */
 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
 		return;
 
@@ -1077,16 +1109,14 @@ static void virtnet_set_rx_mode(struct net_device *dev)
 	sg_init_one(sg, &promisc, sizeof(promisc));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
-				  VIRTIO_NET_CTRL_RX_PROMISC,
-				  sg, NULL))
+				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
 		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
 			 promisc ? "en" : "dis");
 
 	sg_init_one(sg, &allmulti, sizeof(allmulti));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
-				  VIRTIO_NET_CTRL_RX_ALLMULTI,
-				  sg, NULL))
+				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
 		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
 			 allmulti ? "en" : "dis");
 
@@ -1122,8 +1152,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
 		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
-				  VIRTIO_NET_CTRL_MAC_TABLE_SET,
-				  sg, NULL))
+				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
 		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
 
 	kfree(buf);
@@ -1138,7 +1167,7 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
 	sg_init_one(&sg, &vid, sizeof(vid));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
-				  VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
+				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
 		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
 	return 0;
 }
@@ -1152,7 +1181,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
 	sg_init_one(&sg, &vid, sizeof(vid));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
-				  VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
+				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
 		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
 	return 0;
 }
@@ -1386,6 +1415,14 @@ static void free_receive_bufs(struct virtnet_info *vi)
 	}
 }
 
+static void free_receive_page_frags(struct virtnet_info *vi)
+{
+	int i;
+	for (i = 0; i < vi->max_queue_pairs; i++)
+		if (vi->rq[i].alloc_frag.page)
+			put_page(vi->rq[i].alloc_frag.page);
+}
+
 static void free_unused_bufs(struct virtnet_info *vi)
 {
 	void *buf;
@@ -1401,15 +1438,16 @@ static void free_unused_bufs(struct virtnet_info *vi)
 		struct virtqueue *vq = vi->rq[i].vq;
 
 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
-			if (vi->mergeable_rx_bufs)
-				put_page(virt_to_head_page(buf));
-			else if (vi->big_packets)
+			if (vi->mergeable_rx_bufs) {
+				unsigned long ctx = (unsigned long)buf;
+				void *base = mergeable_ctx_to_buf_address(ctx);
+				put_page(virt_to_head_page(base));
+			} else if (vi->big_packets) {
 				give_pages(&vi->rq[i], buf);
-			else
+			} else {
 				dev_kfree_skb(buf);
-			--vi->rq[i].num;
+			}
 		}
-		BUG_ON(vi->rq[i].num != 0);
 	}
 }
 
@@ -1516,6 +1554,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
 			       napi_weight);
 
 		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
+		ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
 		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
 	}
 
@@ -1552,6 +1591,33 @@ err:
 	return ret;
 }
 
+#ifdef CONFIG_SYSFS
+static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
+		struct rx_queue_attribute *attribute, char *buf)
+{
+	struct virtnet_info *vi = netdev_priv(queue->dev);
+	unsigned int queue_index = get_netdev_rx_queue_index(queue);
+	struct ewma *avg;
+
+	BUG_ON(queue_index >= vi->max_queue_pairs);
+	avg = &vi->rq[queue_index].mrg_avg_pkt_len;
+	return sprintf(buf, "%u\n", get_mergeable_buf_len(avg));
+}
+
+static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
+	__ATTR_RO(mergeable_rx_buffer_size);
+
+static struct attribute *virtio_net_mrg_rx_attrs[] = {
+	&mergeable_rx_buffer_size_attribute.attr,
+	NULL
+};
+
+static const struct attribute_group virtio_net_mrg_rx_group = {
+	.name = "virtio_net",
+	.attrs = virtio_net_mrg_rx_attrs
+};
+#endif
+
 static int virtnet_probe(struct virtio_device *vdev)
 {
 	int i, err;
@@ -1666,6 +1732,10 @@ static int virtnet_probe(struct virtio_device *vdev)
 	if (err)
 		goto free_stats;
 
+#ifdef CONFIG_SYSFS
+	if (vi->mergeable_rx_bufs)
+		dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
+#endif
 	netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
 	netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
 
@@ -1680,7 +1750,8 @@ static int virtnet_probe(struct virtio_device *vdev)
 		try_fill_recv(&vi->rq[i], GFP_KERNEL);
 
 		/* If we didn't even get one input buffer, we're useless. */
-		if (vi->rq[i].num == 0) {
+		if (vi->rq[i].vq->num_free ==
+		    virtqueue_get_vring_size(vi->rq[i].vq)) {
 			free_unused_bufs(vi);
 			err = -ENOMEM;
 			goto free_recv_bufs;
@@ -1714,9 +1785,8 @@ free_recv_bufs:
 	unregister_netdev(dev);
 free_vqs:
 	cancel_delayed_work_sync(&vi->refill);
+	free_receive_page_frags(vi);
 	virtnet_del_vqs(vi);
-	if (vi->alloc_frag.page)
-		put_page(vi->alloc_frag.page);
 free_stats:
 	free_percpu(vi->stats);
 free:
@@ -1733,6 +1803,8 @@ static void remove_vq_common(struct virtnet_info *vi)
 
 	free_receive_bufs(vi);
 
+	free_receive_page_frags(vi);
+
 	virtnet_del_vqs(vi);
 }
 
@@ -1750,8 +1822,6 @@ static void virtnet_remove(struct virtio_device *vdev)
 	unregister_netdev(vi->dev);
 
 	remove_vq_common(vi);
-	if (vi->alloc_frag.page)
-		put_page(vi->alloc_frag.page);
 
 	flush_work(&vi->config_work);
 
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 7e2788c488ed..3be786faaaec 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1235,7 +1235,9 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
 #ifdef VMXNET3_RSS
 			if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
 			    (adapter->netdev->features & NETIF_F_RXHASH))
-				ctx->skb->rxhash = le32_to_cpu(rcd->rssHash);
+				skb_set_hash(ctx->skb,
+					     le32_to_cpu(rcd->rssHash),
+					     PKT_HASH_TYPE_L3);
 #endif
 			skb_put(ctx->skb, rcd->len);
 
@@ -3132,7 +3134,6 @@ err_alloc_queue_desc:
 err_alloc_shared:
 	dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
 			 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(netdev);
 	return err;
 }
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 12040a35d95d..190569d02450 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -37,7 +37,6 @@
 #include <linux/spinlock.h>
 #include <linux/ioport.h>
 #include <linux/highmem.h>
-#include <linux/init.h>
 #include <linux/timer.h>
 #include <linux/skbuff.h>
 #include <linux/interrupt.h>
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ed384fee76ac..026a313c2d2d 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -40,6 +40,7 @@
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 #include <net/vxlan.h>
+#include <net/protocol.h>
 #if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
 #include <net/addrconf.h>
@@ -554,13 +555,106 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
 	return 1;
 }
 
+static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+{
+	struct sk_buff *p, **pp = NULL;
+	struct vxlanhdr *vh, *vh2;
+	struct ethhdr *eh, *eh2;
+	unsigned int hlen, off_vx, off_eth;
+	const struct packet_offload *ptype;
+	__be16 type;
+	int flush = 1;
+
+	off_vx = skb_gro_offset(skb);
+	hlen = off_vx + sizeof(*vh);
+	vh   = skb_gro_header_fast(skb, off_vx);
+	if (skb_gro_header_hard(skb, hlen)) {
+		vh = skb_gro_header_slow(skb, hlen, off_vx);
+		if (unlikely(!vh))
+			goto out;
+	}
+	skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
+
+	off_eth = skb_gro_offset(skb);
+	hlen = off_eth + sizeof(*eh);
+	eh   = skb_gro_header_fast(skb, off_eth);
+	if (skb_gro_header_hard(skb, hlen)) {
+		eh = skb_gro_header_slow(skb, hlen, off_eth);
+		if (unlikely(!eh))
+			goto out;
+	}
+
+	flush = 0;
+
+	for (p = *head; p; p = p->next) {
+		if (!NAPI_GRO_CB(p)->same_flow)
+			continue;
+
+		vh2 = (struct vxlanhdr *)(p->data + off_vx);
+		eh2 = (struct ethhdr   *)(p->data + off_eth);
+		if (vh->vx_vni != vh2->vx_vni || compare_ether_header(eh, eh2)) {
+			NAPI_GRO_CB(p)->same_flow = 0;
+			continue;
+		}
+		goto found;
+	}
+
+found:
+	type = eh->h_proto;
+
+	rcu_read_lock();
+	ptype = gro_find_receive_by_type(type);
+	if (ptype == NULL) {
+		flush = 1;
+		goto out_unlock;
+	}
+
+	skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */
+	pp = ptype->callbacks.gro_receive(head, skb);
+
+out_unlock:
+	rcu_read_unlock();
+out:
+	NAPI_GRO_CB(skb)->flush |= flush;
+
+	return pp;
+}
+
+static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
+{
+	struct ethhdr *eh;
+	struct packet_offload *ptype;
+	__be16 type;
+	int vxlan_len  = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
+	int err = -ENOSYS;
+
+	eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr));
+	type = eh->h_proto;
+
+	rcu_read_lock();
+	ptype = gro_find_complete_by_type(type);
+	if (ptype != NULL)
+		err = ptype->callbacks.gro_complete(skb, nhoff + vxlan_len);
+
+	rcu_read_unlock();
+	return err;
+}
+
 /* Notify netdevs that UDP port started listening */
-static void vxlan_notify_add_rx_port(struct sock *sk)
+static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
 {
 	struct net_device *dev;
+	struct sock *sk = vs->sock->sk;
 	struct net *net = sock_net(sk);
 	sa_family_t sa_family = sk->sk_family;
 	__be16 port = inet_sk(sk)->inet_sport;
+	int err;
+
+	if (sa_family == AF_INET) {
+		err = udp_add_offload(&vs->udp_offloads);
+		if (err)
+			pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
+	}
 
 	rcu_read_lock();
 	for_each_netdev_rcu(net, dev) {
@@ -572,9 +666,10 @@ static void vxlan_notify_add_rx_port(struct sock *sk)
 }
 
 /* Notify netdevs that UDP port is no more listening */
-static void vxlan_notify_del_rx_port(struct sock *sk)
+static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
 {
 	struct net_device *dev;
+	struct sock *sk = vs->sock->sk;
 	struct net *net = sock_net(sk);
 	sa_family_t sa_family = sk->sk_family;
 	__be16 port = inet_sk(sk)->inet_sport;
@@ -586,6 +681,9 @@ static void vxlan_notify_del_rx_port(struct sock *sk)
 							    port);
 	}
 	rcu_read_unlock();
+
+	if (sa_family == AF_INET)
+		udp_del_offload(&vs->udp_offloads);
 }
 
 /* Add new entry to forwarding table -- assumes lock held */
@@ -741,10 +839,9 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
 		if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
 			return -EINVAL;
 		*ifindex = nla_get_u32(tb[NDA_IFINDEX]);
-		tdev = dev_get_by_index(net, *ifindex);
+		tdev = __dev_get_by_index(net, *ifindex);
 		if (!tdev)
 			return -EADDRNOTAVAIL;
-		dev_put(tdev);
 	} else {
 		*ifindex = 0;
 	}
@@ -916,17 +1013,32 @@ static bool vxlan_snoop(struct net_device *dev,
 }
 
 /* See if multicast group is already in use by other ID */
-static bool vxlan_group_used(struct vxlan_net *vn, union vxlan_addr *remote_ip)
+static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
 {
 	struct vxlan_dev *vxlan;
 
+	/* The vxlan_sock is only used by dev, leaving group has
+	 * no effect on other vxlan devices.
+	 */
+	if (atomic_read(&dev->vn_sock->refcnt) == 1)
+		return false;
+
 	list_for_each_entry(vxlan, &vn->vxlan_list, next) {
-		if (!netif_running(vxlan->dev))
+		if (!netif_running(vxlan->dev) || vxlan == dev)
 			continue;
 
-		if (vxlan_addr_equal(&vxlan->default_dst.remote_ip,
-				     remote_ip))
-			return true;
+		if (vxlan->vn_sock != dev->vn_sock)
+			continue;
+
+		if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
+				      &dev->default_dst.remote_ip))
+			continue;
+
+		if (vxlan->default_dst.remote_ifindex !=
+		    dev->default_dst.remote_ifindex)
+			continue;
+
+		return true;
 	}
 
 	return false;
@@ -949,7 +1061,7 @@ void vxlan_sock_release(struct vxlan_sock *vs)
 	spin_lock(&vn->sock_lock);
 	hlist_del_rcu(&vs->hlist);
 	rcu_assign_sk_user_data(vs->sock->sk, NULL);
-	vxlan_notify_del_rx_port(sk);
+	vxlan_notify_del_rx_port(vs);
 	spin_unlock(&vn->sock_lock);
 
 	queue_work(vxlan_wq, &vs->del_work);
@@ -1047,6 +1159,16 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 	if (!vs)
 		goto drop;
 
+	/* If the NIC driver gave us an encapsulated packet
+	 * with the encapsulation mark, the device checksummed it
+	 * for us. Otherwise force the upper layers to verify it.
+	 */
+	if ((skb->ip_summed != CHECKSUM_UNNECESSARY && skb->ip_summed != CHECKSUM_PARTIAL) ||
+	    !skb->encapsulation)
+		skb->ip_summed = CHECKSUM_NONE;
+
+	skb->encapsulation = 0;
+
 	vs->rcv(vs, skb, vxh->vx_vni);
 	return 0;
 
@@ -1066,7 +1188,7 @@ static void vxlan_rcv(struct vxlan_sock *vs,
 	struct iphdr *oip = NULL;
 	struct ipv6hdr *oip6 = NULL;
 	struct vxlan_dev *vxlan;
-	struct pcpu_tstats *stats;
+	struct pcpu_sw_netstats *stats;
 	union vxlan_addr saddr;
 	__u32 vni;
 	int err = 0;
@@ -1105,17 +1227,6 @@ static void vxlan_rcv(struct vxlan_sock *vs,
 
 	skb_reset_network_header(skb);
 
-	/* If the NIC driver gave us an encapsulated packet with
-	 * CHECKSUM_UNNECESSARY and Rx checksum feature is enabled,
-	 * leave the CHECKSUM_UNNECESSARY, the device checksummed it
-	 * for us. Otherwise force the upper layers to verify it.
-	 */
-	if (skb->ip_summed != CHECKSUM_UNNECESSARY || !skb->encapsulation ||
-	    !(vxlan->dev->features & NETIF_F_RXCSUM))
-		skb->ip_summed = CHECKSUM_NONE;
-
-	skb->encapsulation = 0;
-
 	if (oip6)
 		err = IP6_ECN_decapsulate(oip6, skb);
 	if (oip)
@@ -1366,20 +1477,6 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
 	return false;
 }
 
-static void vxlan_sock_put(struct sk_buff *skb)
-{
-	sock_put(skb->sk);
-}
-
-/* On transmit, associate with the tunnel socket */
-static void vxlan_set_owner(struct sock *sk, struct sk_buff *skb)
-{
-	skb_orphan(skb);
-	sock_hold(sk);
-	skb->sk = sk;
-	skb->destructor = vxlan_sock_put;
-}
-
 /* Compute source port for outgoing packet
  *   first choice to use L4 flow hash since it will spread
  *     better and maybe available from hardware
@@ -1390,7 +1487,7 @@ __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
 	unsigned int range = (port_max - port_min) + 1;
 	u32 hash;
 
-	hash = skb_get_rxhash(skb);
+	hash = skb_get_hash(skb);
 	if (!hash)
 		hash = jhash(skb->data, 2 * ETH_ALEN,
 			     (__force u32) skb->protocol);
@@ -1499,8 +1596,6 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
 	ip6h->daddr	  = *daddr;
 	ip6h->saddr	  = *saddr;
 
-	vxlan_set_owner(vs->sock->sk, skb);
-
 	err = handle_offloads(skb);
 	if (err)
 		return err;
@@ -1557,8 +1652,6 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
 	uh->len = htons(skb->len);
 	uh->check = 0;
 
-	vxlan_set_owner(vs->sock->sk, skb);
-
 	err = handle_offloads(skb);
 	if (err)
 		return err;
@@ -1572,11 +1665,12 @@ EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
 			       struct vxlan_dev *dst_vxlan)
 {
-	struct pcpu_tstats *tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
-	struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
+	struct pcpu_sw_netstats *tx_stats, *rx_stats;
 	union vxlan_addr loopback;
 	union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
 
+	tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
+	rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
 	skb->pkt_type = PACKET_HOST;
 	skb->encapsulation = 0;
 	skb->dev = dst_vxlan->dev;
@@ -1770,7 +1864,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
 	struct vxlan_dev *vxlan = netdev_priv(dev);
 	struct ethhdr *eth;
 	bool did_rsc = false;
-	struct vxlan_rdst *rdst;
+	struct vxlan_rdst *rdst, *fdst = NULL;
 	struct vxlan_fdb *f;
 
 	skb_reset_mac_header(skb);
@@ -1812,7 +1906,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
 				vxlan_fdb_miss(vxlan, eth->h_dest);
 
 			dev->stats.tx_dropped++;
-			dev_kfree_skb(skb);
+			kfree_skb(skb);
 			return NETDEV_TX_OK;
 		}
 	}
@@ -1820,12 +1914,19 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
 	list_for_each_entry_rcu(rdst, &f->remotes, list) {
 		struct sk_buff *skb1;
 
+		if (!fdst) {
+			fdst = rdst;
+			continue;
+		}
 		skb1 = skb_clone(skb, GFP_ATOMIC);
 		if (skb1)
 			vxlan_xmit_one(skb1, dev, rdst, did_rsc);
 	}
 
-	dev_kfree_skb(skb);
+	if (fdst)
+		vxlan_xmit_one(skb, dev, fdst, did_rsc);
+	else
+		kfree_skb(skb);
 	return NETDEV_TX_OK;
 }
 
@@ -1882,12 +1983,12 @@ static int vxlan_init(struct net_device *dev)
 	struct vxlan_sock *vs;
 	int i;
 
-	dev->tstats = alloc_percpu(struct pcpu_tstats);
+	dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
 	if (!dev->tstats)
 		return -ENOMEM;
 
 	for_each_possible_cpu(i) {
-		struct pcpu_tstats *vxlan_stats;
+		struct pcpu_sw_netstats *vxlan_stats;
 		vxlan_stats = per_cpu_ptr(dev->tstats, i);
 		u64_stats_init(&vxlan_stats->syncp);
 	}
@@ -1935,7 +2036,6 @@ static void vxlan_uninit(struct net_device *dev)
 /* Start ageing timer and join group when device is brought up */
 static int vxlan_open(struct net_device *dev)
 {
-	struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
 	struct vxlan_dev *vxlan = netdev_priv(dev);
 	struct vxlan_sock *vs = vxlan->vn_sock;
 
@@ -1943,8 +2043,7 @@ static int vxlan_open(struct net_device *dev)
 	if (!vs)
 		return -ENOTCONN;
 
-	if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
-	    vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) {
+	if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
 		vxlan_sock_hold(vs);
 		dev_hold(dev);
 		queue_work(vxlan_wq, &vxlan->igmp_join);
@@ -1983,7 +2082,7 @@ static int vxlan_stop(struct net_device *dev)
 	struct vxlan_sock *vs = vxlan->vn_sock;
 
 	if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
-	    ! vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) {
+	    !vxlan_group_used(vn, vxlan)) {
 		vxlan_sock_hold(vs);
 		dev_hold(dev);
 		queue_work(vxlan_wq, &vxlan->igmp_leave);
@@ -2001,6 +2100,29 @@ static void vxlan_set_multicast_list(struct net_device *dev)
 {
 }
 
+static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct vxlan_dev *vxlan = netdev_priv(dev);
+	struct vxlan_rdst *dst = &vxlan->default_dst;
+	struct net_device *lowerdev;
+	int max_mtu;
+
+	lowerdev = __dev_get_by_index(dev_net(dev), dst->remote_ifindex);
+	if (lowerdev == NULL)
+		return eth_change_mtu(dev, new_mtu);
+
+	if (dst->remote_ip.sa.sa_family == AF_INET6)
+		max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
+	else
+		max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
+
+	if (new_mtu < 68 || new_mtu > max_mtu)
+		return -EINVAL;
+
+	dev->mtu = new_mtu;
+	return 0;
+}
+
 static const struct net_device_ops vxlan_netdev_ops = {
 	.ndo_init		= vxlan_init,
 	.ndo_uninit		= vxlan_uninit,
@@ -2009,7 +2131,7 @@ static const struct net_device_ops vxlan_netdev_ops = {
 	.ndo_start_xmit		= vxlan_xmit,
 	.ndo_get_stats64	= ip_tunnel_get_stats64,
 	.ndo_set_rx_mode	= vxlan_set_multicast_list,
-	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_change_mtu		= vxlan_change_mtu,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_set_mac_address	= eth_mac_addr,
 	.ndo_fdb_add		= vxlan_fdb_add,
@@ -2278,7 +2400,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
 	struct sock *sk;
 	unsigned int h;
 
-	vs = kmalloc(sizeof(*vs), GFP_KERNEL);
+	vs = kzalloc(sizeof(*vs), GFP_KERNEL);
 	if (!vs)
 		return ERR_PTR(-ENOMEM);
 
@@ -2303,9 +2425,14 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
 	vs->data = data;
 	rcu_assign_sk_user_data(vs->sock->sk, vs);
 
+	/* Initialize the vxlan udp offloads structure */
+	vs->udp_offloads.port = port;
+	vs->udp_offloads.callbacks.gro_receive  = vxlan_gro_receive;
+	vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
+
 	spin_lock(&vn->sock_lock);
 	hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
-	vxlan_notify_add_rx_port(sk);
+	vxlan_notify_add_rx_port(vs);
 	spin_unlock(&vn->sock_lock);
 
 	/* Mark socket as an encapsulation socket. */
@@ -2630,6 +2757,44 @@ static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
 	.fill_info	= vxlan_fill_info,
 };
 
+static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
+					     struct net_device *dev)
+{
+	struct vxlan_dev *vxlan, *next;
+	LIST_HEAD(list_kill);
+
+	list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
+		struct vxlan_rdst *dst = &vxlan->default_dst;
+
+		/* In case we created vxlan device with carrier
+		 * and we loose the carrier due to module unload
+		 * we also need to remove vxlan device. In other
+		 * cases, it's not necessary and remote_ifindex
+		 * is 0 here, so no matches.
+		 */
+		if (dst->remote_ifindex == dev->ifindex)
+			vxlan_dellink(vxlan->dev, &list_kill);
+	}
+
+	unregister_netdevice_many(&list_kill);
+}
+
+static int vxlan_lowerdev_event(struct notifier_block *unused,
+				unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+	struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
+
+	if (event == NETDEV_UNREGISTER)
+		vxlan_handle_lowerdev_unregister(vn, dev);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block vxlan_notifier_block __read_mostly = {
+	.notifier_call = vxlan_lowerdev_event,
+};
+
 static __net_init int vxlan_init_net(struct net *net)
 {
 	struct vxlan_net *vn = net_generic(net, vxlan_net_id);
@@ -2644,22 +2809,8 @@ static __net_init int vxlan_init_net(struct net *net)
 	return 0;
 }
 
-static __net_exit void vxlan_exit_net(struct net *net)
-{
-	struct vxlan_net *vn = net_generic(net, vxlan_net_id);
-	struct vxlan_dev *vxlan;
-	LIST_HEAD(list);
-
-	rtnl_lock();
-	list_for_each_entry(vxlan, &vn->vxlan_list, next)
-		unregister_netdevice_queue(vxlan->dev, &list);
-	unregister_netdevice_many(&list);
-	rtnl_unlock();
-}
-
 static struct pernet_operations vxlan_net_ops = {
 	.init = vxlan_init_net,
-	.exit = vxlan_exit_net,
 	.id   = &vxlan_net_id,
 	.size = sizeof(struct vxlan_net),
 };
@@ -2674,18 +2825,23 @@ static int __init vxlan_init_module(void)
 
 	get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
 
-	rc = register_pernet_device(&vxlan_net_ops);
+	rc = register_pernet_subsys(&vxlan_net_ops);
 	if (rc)
 		goto out1;
 
-	rc = rtnl_link_register(&vxlan_link_ops);
+	rc = register_netdevice_notifier(&vxlan_notifier_block);
 	if (rc)
 		goto out2;
 
-	return 0;
+	rc = rtnl_link_register(&vxlan_link_ops);
+	if (rc)
+		goto out3;
 
+	return 0;
+out3:
+	unregister_netdevice_notifier(&vxlan_notifier_block);
 out2:
-	unregister_pernet_device(&vxlan_net_ops);
+	unregister_pernet_subsys(&vxlan_net_ops);
 out1:
 	destroy_workqueue(vxlan_wq);
 	return rc;
@@ -2695,13 +2851,15 @@ late_initcall(vxlan_init_module);
 static void __exit vxlan_cleanup_module(void)
 {
 	rtnl_link_unregister(&vxlan_link_ops);
+	unregister_netdevice_notifier(&vxlan_notifier_block);
 	destroy_workqueue(vxlan_wq);
-	unregister_pernet_device(&vxlan_net_ops);
-	rcu_barrier();
+	unregister_pernet_subsys(&vxlan_net_ops);
+	/* rcu_barrier() is called by netns */
 }
 module_exit(vxlan_cleanup_module);
 
 MODULE_LICENSE("GPL");
 MODULE_VERSION(VXLAN_VERSION);
 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
+MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
 MODULE_ALIAS_RTNL_LINK("vxlan");
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 851dc7b7e8b0..288610df205c 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -699,8 +699,6 @@ static void dscc4_free1(struct pci_dev *pdev)
 	for (i = 0; i < dev_per_card; i++)
 		unregister_hdlc_device(dscc4_to_dev(root + i));
 
-	pci_set_drvdata(pdev, NULL);
-
 	for (i = 0; i < dev_per_card; i++)
 		free_netdev(root[i].dev);
 	kfree(root);
diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c
index 62f01b74cbd6..dc334c85d966 100644
--- a/drivers/net/wan/hd64570.c
+++ b/drivers/net/wan/hd64570.c
@@ -29,7 +29,6 @@
 #include <linux/fcntl.h>
 #include <linux/hdlc.h>
 #include <linux/in.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/jiffies.h>
diff --git a/drivers/net/wan/hd64570.h b/drivers/net/wan/hd64570.h
index e4f539ad071b..10963e8f4b39 100644
--- a/drivers/net/wan/hd64570.h
+++ b/drivers/net/wan/hd64570.h
@@ -159,7 +159,7 @@ typedef struct {
 /* Packet Descriptor Status bits */
 
 #define ST_TX_EOM     0x80	/* End of frame */
-#define ST_TX_EOT     0x01	/* End of transmition */
+#define ST_TX_EOT     0x01	/* End of transmission */
 
 #define ST_RX_EOM     0x80	/* End of frame */
 #define ST_RX_SHORT   0x40	/* Short frame */
@@ -211,7 +211,7 @@ typedef struct {
 
 #define CTL_NORTS     0x01
 #define CTL_IDLE      0x10	/* Transmit an idle pattern */
-#define CTL_UDRNC     0x20	/* Idle after CRC or FCS+flag transmition */
+#define CTL_UDRNC     0x20	/* Idle after CRC or FCS+flag transmission */
 
 #define ST0_TXRDY     0x02	/* TX ready */
 #define ST0_RXRDY     0x01	/* RX ready */
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index 6269a09c7369..e92ecf1d3314 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -29,7 +29,6 @@
 #include <linux/fcntl.h>
 #include <linux/hdlc.h>
 #include <linux/in.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/jiffies.h>
diff --git a/drivers/net/wan/hd64572.h b/drivers/net/wan/hd64572.h
index 96567c2dc4db..22137ee669cf 100644
--- a/drivers/net/wan/hd64572.h
+++ b/drivers/net/wan/hd64572.h
@@ -218,7 +218,7 @@ typedef struct {
 #define ST_TX_EOM     0x80	/* End of frame */
 #define ST_TX_UNDRRUN 0x08
 #define ST_TX_OWNRSHP 0x02
-#define ST_TX_EOT     0x01	/* End of transmition */
+#define ST_TX_EOT     0x01	/* End of transmission */
 
 #define ST_RX_EOM     0x80	/* End of frame */
 #define ST_RX_SHORT   0x40	/* Short frame */
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index f51204cfe12f..b2fe9bb89633 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -49,7 +49,6 @@
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <linux/hdlc.h>
-#include <linux/init.h>
 #include <linux/in.h>
 #include <linux/if_arp.h>
 #include <linux/netdevice.h>
@@ -973,7 +972,6 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
     return 0;
 
 err_hdlcdev:
-	pci_set_drvdata(pdev, NULL);
 	kfree(sc);
 err_kzalloc:
 	pci_release_regions(pdev);
@@ -995,7 +993,6 @@ static void lmc_remove_one(struct pci_dev *pdev)
 		free_netdev(dev);
 		pci_release_regions(pdev);
 		pci_disable_device(pdev);
-		pci_set_drvdata(pdev, NULL);
 	}
 }
 
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index 53efc57fcace..5b72f7f8c516 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -281,7 +281,6 @@ static void pc300_pci_remove_one(struct pci_dev *pdev)
 
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 	if (card->ports[0].netdev)
 		free_netdev(card->ports[0].netdev);
 	if (card->ports[1].netdev)
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index ddbce54040e2..fe4e3ece3c42 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -260,7 +260,6 @@ static void pci200_pci_remove_one(struct pci_dev *pdev)
 
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 	if (card->ports[0].netdev)
 		free_netdev(card->ports[0].netdev);
 	if (card->ports[1].netdev)
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 388ddf60a66d..1b89ecf0959e 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -57,6 +57,7 @@
 
 #include <net/net_namespace.h>
 #include <net/arp.h>
+#include <net/Space.h>
 
 #include <asm/io.h>
 #include <asm/types.h>
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 4c0a69779b89..f76aa9081585 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -542,7 +542,6 @@ static void wanxl_pci_remove_one(struct pci_dev *pdev)
 
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 	kfree(card);
 }
 
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index cfce83e1f273..f35f93c31b09 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -15,7 +15,6 @@
  * more details.
  */
 
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/if.h>
 #include <linux/skbuff.h>
@@ -1314,7 +1313,7 @@ static void adm8211_bss_info_changed(struct ieee80211_hw *dev,
 	if (!(changes & BSS_CHANGED_BSSID))
 		return;
 
-	if (memcmp(conf->bssid, priv->bssid, ETH_ALEN)) {
+	if (!ether_addr_equal(conf->bssid, priv->bssid)) {
 		adm8211_set_bssid(dev, conf->bssid);
 		memcpy(priv->bssid, conf->bssid, ETH_ALEN);
 	}
@@ -1866,7 +1865,6 @@ static int adm8211_probe(struct pci_dev *pdev,
 	dev->flags = IEEE80211_HW_SIGNAL_UNSPEC;
 	dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
 
-	dev->channel_change_time = 1000;
 	dev->max_signal = 100;    /* FIXME: find better value */
 
 	dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index 14128fd265ac..7e9ede6c5798 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -23,7 +23,6 @@
 #ifdef __IN_PCMCIA_PACKAGE__
 #include <pcmcia/k_compat.h>
 #endif
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/ptrace.h>
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 34c8a33cac06..99b3bfa717d5 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1721,7 +1721,7 @@ static void at76_mac80211_tx(struct ieee80211_hw *hw,
 	 * following workaround is necessary. If the TX frame is an
 	 * authentication frame extract the bssid and send the CMD_JOIN. */
 	if (mgmt->frame_control & cpu_to_le16(IEEE80211_STYPE_AUTH)) {
-		if (!ether_addr_equal(priv->bssid, mgmt->bssid)) {
+		if (!ether_addr_equal_64bits(priv->bssid, mgmt->bssid)) {
 			memcpy(priv->bssid, mgmt->bssid, ETH_ALEN);
 			ieee80211_queue_work(hw, &priv->work_join_bssid);
 			dev_kfree_skb_any(skb);
@@ -2112,7 +2112,6 @@ static struct at76_priv *at76_alloc_new_device(struct usb_device *udev)
 	priv->pm_period = 0;
 
 	/* unit us */
-	priv->hw->channel_change_time = 100000;
 
 	return priv;
 }
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 280fc3d53a36..8aa20df55e50 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -25,7 +25,6 @@
  * that and only has minimal functionality.
  */
 #include <linux/compiler.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/list.h>
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index e0ba7cd14252..b59cfbe0276b 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -17,6 +17,7 @@
 #ifndef ATH_H
 #define ATH_H
 
+#include <linux/etherdevice.h>
 #include <linux/skbuff.h>
 #include <linux/if_ether.h>
 #include <linux/spinlock.h>
@@ -165,6 +166,7 @@ struct ath_common {
 struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
 				u32 len,
 				gfp_t gfp_mask);
+bool ath_is_mybeacon(struct ath_common *common, struct ieee80211_hdr *hdr);
 
 void ath_hw_setbssidmask(struct ath_common *common);
 void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key);
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 82e8088ca9b4..a6f5285235af 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -37,3 +37,10 @@ config ATH10K_TRACING
 	---help---
 	  Select this to ath10k use tracing infrastructure.
 
+config ATH10K_DFS_CERTIFIED
+	bool "Atheros DFS support for certified platforms"
+	depends on ATH10K && CFG80211_CERTIFICATION_ONUS
+	default n
+	---help---
+	This option enables DFS support for initiating radiation on
+	ath10k.
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index e46951b8fb92..d44d618b05f9 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -243,6 +243,16 @@ static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
 			   misc_ie_addr | CE_ERROR_MASK);
 }
 
+static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
+						u32 ce_ctrl_addr)
+{
+	u32 misc_ie_addr = ath10k_pci_read32(ar,
+					     ce_ctrl_addr + MISC_IE_ADDRESS);
+
+	ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
+			   misc_ie_addr & ~CE_ERROR_MASK);
+}
+
 static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
 						     u32 ce_ctrl_addr,
 						     unsigned int mask)
@@ -731,7 +741,6 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
 
 void ath10k_ce_per_engine_service_any(struct ath10k *ar)
 {
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	int ce_id, ret;
 	u32 intr_summary;
 
@@ -741,7 +750,7 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
 
 	intr_summary = CE_INTERRUPT_SUMMARY(ar);
 
-	for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
+	for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
 		if (intr_summary & (1 << ce_id))
 			intr_summary &= ~(1 << ce_id);
 		else
@@ -783,22 +792,25 @@ static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
 	ath10k_pci_sleep(ar);
 }
 
-void ath10k_ce_disable_interrupts(struct ath10k *ar)
+int ath10k_ce_disable_interrupts(struct ath10k *ar)
 {
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	int ce_id, ret;
 
 	ret = ath10k_pci_wake(ar);
 	if (ret)
-		return;
+		return ret;
 
-	for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
-		struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
-		u32 ctrl_addr = ce_state->ctrl_addr;
+	for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
+		u32 ctrl_addr = ath10k_ce_base_address(ce_id);
 
 		ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+		ath10k_ce_error_intr_disable(ar, ctrl_addr);
+		ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
 	}
+
 	ath10k_pci_sleep(ar);
+
+	return 0;
 }
 
 void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
@@ -1047,9 +1059,19 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
 				const struct ce_attr *attr)
 {
 	struct ath10k_ce_pipe *ce_state;
-	u32 ctrl_addr = ath10k_ce_base_address(ce_id);
 	int ret;
 
+	/*
+	 * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
+	 * additional TX locking checks.
+	 *
+	 * For the lack of a better place do the check here.
+	 */
+	BUILD_BUG_ON(TARGET_NUM_MSDU_DESC >
+		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+	BUILD_BUG_ON(TARGET_10X_NUM_MSDU_DESC >
+		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+
 	ret = ath10k_pci_wake(ar);
 	if (ret)
 		return NULL;
@@ -1057,7 +1079,7 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
 	ce_state = ath10k_ce_init_state(ar, ce_id, attr);
 	if (!ce_state) {
 		ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
-		return NULL;
+		goto out;
 	}
 
 	if (attr->src_nentries) {
@@ -1066,7 +1088,8 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
 			ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
 				   ce_id, ret);
 			ath10k_ce_deinit(ce_state);
-			return NULL;
+			ce_state = NULL;
+			goto out;
 		}
 	}
 
@@ -1076,15 +1099,13 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
 			ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
 				   ce_id, ret);
 			ath10k_ce_deinit(ce_state);
-			return NULL;
+			ce_state = NULL;
+			goto out;
 		}
 	}
 
-	/* Enable CE error interrupts */
-	ath10k_ce_error_intr_enable(ar, ctrl_addr);
-
+out:
 	ath10k_pci_sleep(ar);
-
 	return ce_state;
 }
 
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 15d45b5b7615..67dbde6a5c74 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -234,7 +234,7 @@ void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
 /*==================CE Interrupt Handlers====================*/
 void ath10k_ce_per_engine_service_any(struct ath10k *ar);
 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
-void ath10k_ce_disable_interrupts(struct ath10k *ar);
+int ath10k_ce_disable_interrupts(struct ath10k *ar);
 
 /* ce_attr.flags values */
 /* Use NonSnooping PCIe accesses? */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 1129994fb105..3b59af3bddf4 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -597,10 +597,8 @@ static int ath10k_init_uart(struct ath10k *ar)
 		return ret;
 	}
 
-	if (!uart_print) {
-		ath10k_info("UART prints disabled\n");
+	if (!uart_print)
 		return 0;
-	}
 
 	ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7);
 	if (ret) {
@@ -645,8 +643,8 @@ static int ath10k_init_hw_params(struct ath10k *ar)
 
 	ar->hw_params = *hw_params;
 
-	ath10k_info("Hardware name %s version 0x%x\n",
-		    ar->hw_params.name, ar->target_version);
+	ath10k_dbg(ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n",
+		   ar->hw_params.name, ar->target_version);
 
 	return 0;
 }
@@ -664,7 +662,8 @@ static void ath10k_core_restart(struct work_struct *work)
 		ieee80211_restart_hw(ar->hw);
 		break;
 	case ATH10K_STATE_OFF:
-		/* this can happen if driver is being unloaded */
+		/* this can happen if driver is being unloaded
+		 * or if the crash happens during FW probing */
 		ath10k_warn("cannot restart a device that hasn't been started\n");
 		break;
 	case ATH10K_STATE_RESTARTING:
@@ -737,8 +736,6 @@ EXPORT_SYMBOL(ath10k_core_create);
 
 void ath10k_core_destroy(struct ath10k *ar)
 {
-	ath10k_debug_destroy(ar);
-
 	flush_workqueue(ar->workqueue);
 	destroy_workqueue(ar->workqueue);
 
@@ -786,21 +783,30 @@ int ath10k_core_start(struct ath10k *ar)
 		goto err;
 	}
 
-	status = ath10k_htc_wait_target(&ar->htc);
-	if (status)
+	status = ath10k_hif_start(ar);
+	if (status) {
+		ath10k_err("could not start HIF: %d\n", status);
 		goto err_wmi_detach;
+	}
+
+	status = ath10k_htc_wait_target(&ar->htc);
+	if (status) {
+		ath10k_err("failed to connect to HTC: %d\n", status);
+		goto err_hif_stop;
+	}
 
 	status = ath10k_htt_attach(ar);
 	if (status) {
 		ath10k_err("could not attach htt (%d)\n", status);
-		goto err_wmi_detach;
+		goto err_hif_stop;
 	}
 
 	status = ath10k_init_connect_htc(ar);
 	if (status)
 		goto err_htt_detach;
 
-	ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version);
+	ath10k_dbg(ATH10K_DBG_BOOT, "firmware %s booted\n",
+		   ar->hw->wiphy->fw_version);
 
 	status = ath10k_wmi_cmd_init(ar);
 	if (status) {
@@ -826,12 +832,23 @@ int ath10k_core_start(struct ath10k *ar)
 	ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
 	INIT_LIST_HEAD(&ar->arvifs);
 
+	if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
+		ath10k_info("%s (0x%x) fw %s api %d htt %d.%d\n",
+			    ar->hw_params.name, ar->target_version,
+			    ar->hw->wiphy->fw_version, ar->fw_api,
+			    ar->htt.target_version_major,
+			    ar->htt.target_version_minor);
+
+	__set_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags);
+
 	return 0;
 
 err_disconnect_htc:
 	ath10k_htc_stop(&ar->htc);
 err_htt_detach:
 	ath10k_htt_detach(&ar->htt);
+err_hif_stop:
+	ath10k_hif_stop(ar);
 err_wmi_detach:
 	ath10k_wmi_detach(ar);
 err:
@@ -985,6 +1002,8 @@ void ath10k_core_unregister(struct ath10k *ar)
 	ath10k_mac_unregister(ar);
 
 	ath10k_core_free_firmware_files(ar);
+
+	ath10k_debug_destroy(ar);
 }
 EXPORT_SYMBOL(ath10k_core_unregister);
 
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 0934f7633de3..ade1781c7186 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -30,6 +30,7 @@
 #include "wmi.h"
 #include "../ath.h"
 #include "../regd.h"
+#include "../dfs_pattern_detector.h"
 
 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -43,7 +44,7 @@
 /* Antenna noise floor */
 #define ATH10K_DEFAULT_NOISE_FLOOR -95
 
-#define ATH10K_MAX_NUM_MGMT_PENDING 16
+#define ATH10K_MAX_NUM_MGMT_PENDING 128
 
 struct ath10k;
 
@@ -192,6 +193,14 @@ struct ath10k_target_stats {
 
 };
 
+struct ath10k_dfs_stats {
+	u32 phy_errors;
+	u32 pulses_total;
+	u32 pulses_detected;
+	u32 pulses_discarded;
+	u32 radar_detected;
+};
+
 #define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */
 
 struct ath10k_peer {
@@ -244,6 +253,9 @@ struct ath10k_vif {
 			u8 bssid[ETH_ALEN];
 		} ibss;
 	} u;
+
+	u8 fixed_rate;
+	u8 fixed_nss;
 };
 
 struct ath10k_vif_iter {
@@ -261,6 +273,10 @@ struct ath10k_debug {
 
 	unsigned long htt_stats_mask;
 	struct delayed_work htt_stats_dwork;
+	struct ath10k_dfs_stats dfs_stats;
+	struct ath_dfs_pool_stats dfs_pool_stats;
+
+	u32 fw_dbglog_mask;
 };
 
 enum ath10k_state {
@@ -295,10 +311,19 @@ enum ath10k_fw_features {
 	/* firmware support tx frame management over WMI, otherwise it's HTT */
 	ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2,
 
+	/* Firmware does not support P2P */
+	ATH10K_FW_FEATURE_NO_P2P = 3,
+
 	/* keep last */
 	ATH10K_FW_FEATURE_COUNT,
 };
 
+enum ath10k_dev_flags {
+	/* Indicates that ath10k device is during CAC phase of DFS */
+	ATH10K_CAC_RUNNING,
+	ATH10K_FLAG_FIRST_BOOT_DONE,
+};
+
 struct ath10k {
 	struct ath_common ath_common;
 	struct ieee80211_hw *hw;
@@ -392,6 +417,8 @@ struct ath10k {
 	bool monitor_enabled;
 	bool monitor_present;
 	unsigned int filter_flags;
+	unsigned long dev_flags;
+	u32 dfs_block_radar_events;
 
 	struct wmi_pdev_set_wmm_params_arg wmm_params;
 	struct completion install_key_done;
@@ -410,6 +437,9 @@ struct ath10k {
 	struct list_head peers;
 	wait_queue_head_t peer_mapping_wq;
 
+	/* number of created peers; protected by data_lock */
+	int num_peers;
+
 	struct work_struct offchan_tx_work;
 	struct sk_buff_head offchan_tx_queue;
 	struct completion offchan_tx_completed;
@@ -428,6 +458,8 @@ struct ath10k {
 	u32 survey_last_cycle_count;
 	struct survey_info survey[ATH10K_NUM_CHANS];
 
+	struct dfs_pattern_detector *dfs_detector;
+
 #ifdef CONFIG_ATH10K_DEBUGFS
 	struct ath10k_debug debug;
 #endif
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 760ff2289e3c..6debd281350a 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -614,6 +614,61 @@ static const struct file_operations fops_htt_stats_mask = {
 	.llseek = default_llseek,
 };
 
+static ssize_t ath10k_read_fw_dbglog(struct file *file,
+					    char __user *user_buf,
+					    size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	unsigned int len;
+	char buf[32];
+
+	len = scnprintf(buf, sizeof(buf), "0x%08x\n",
+			ar->debug.fw_dbglog_mask);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_fw_dbglog(struct file *file,
+				      const char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	unsigned long mask;
+	int ret;
+
+	ret = kstrtoul_from_user(user_buf, count, 0, &mask);
+	if (ret)
+		return ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	ar->debug.fw_dbglog_mask = mask;
+
+	if (ar->state == ATH10K_STATE_ON) {
+		ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
+		if (ret) {
+			ath10k_warn("dbglog cfg failed from debugfs: %d\n",
+				    ret);
+			goto exit;
+		}
+	}
+
+	ret = count;
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static const struct file_operations fops_fw_dbglog = {
+	.read = ath10k_read_fw_dbglog,
+	.write = ath10k_write_fw_dbglog,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
 int ath10k_debug_start(struct ath10k *ar)
 {
 	int ret;
@@ -625,6 +680,14 @@ int ath10k_debug_start(struct ath10k *ar)
 		/* continue normally anyway, this isn't serious */
 		ath10k_warn("failed to start htt stats workqueue: %d\n", ret);
 
+	if (ar->debug.fw_dbglog_mask) {
+		ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
+		if (ret)
+			/* not serious */
+			ath10k_warn("failed to enable dbglog during start: %d",
+				    ret);
+	}
+
 	return 0;
 }
 
@@ -639,6 +702,86 @@ void ath10k_debug_stop(struct ath10k *ar)
 		cancel_delayed_work(&ar->debug.htt_stats_dwork);
 }
 
+static ssize_t ath10k_write_simulate_radar(struct file *file,
+					   const char __user *user_buf,
+					   size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+
+	ieee80211_radar_detected(ar->hw);
+
+	return count;
+}
+
+static const struct file_operations fops_simulate_radar = {
+	.write = ath10k_write_simulate_radar,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+#define ATH10K_DFS_STAT(s, p) (\
+	len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \
+			 ar->debug.dfs_stats.p))
+
+#define ATH10K_DFS_POOL_STAT(s, p) (\
+	len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \
+			 ar->debug.dfs_pool_stats.p))
+
+static ssize_t ath10k_read_dfs_stats(struct file *file, char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	int retval = 0, len = 0;
+	const int size = 8000;
+	struct ath10k *ar = file->private_data;
+	char *buf;
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	if (!ar->dfs_detector) {
+		len += scnprintf(buf + len, size - len, "DFS not enabled\n");
+		goto exit;
+	}
+
+	ar->debug.dfs_pool_stats =
+			ar->dfs_detector->get_stats(ar->dfs_detector);
+
+	len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n");
+
+	ATH10K_DFS_STAT("reported phy errors", phy_errors);
+	ATH10K_DFS_STAT("pulse events reported", pulses_total);
+	ATH10K_DFS_STAT("DFS pulses detected", pulses_detected);
+	ATH10K_DFS_STAT("DFS pulses discarded", pulses_discarded);
+	ATH10K_DFS_STAT("Radars detected", radar_detected);
+
+	len += scnprintf(buf + len, size - len, "Global Pool statistics:\n");
+	ATH10K_DFS_POOL_STAT("Pool references", pool_reference);
+	ATH10K_DFS_POOL_STAT("Pulses allocated", pulse_allocated);
+	ATH10K_DFS_POOL_STAT("Pulses alloc error", pulse_alloc_error);
+	ATH10K_DFS_POOL_STAT("Pulses in use", pulse_used);
+	ATH10K_DFS_POOL_STAT("Seqs. allocated", pseq_allocated);
+	ATH10K_DFS_POOL_STAT("Seqs. alloc error", pseq_alloc_error);
+	ATH10K_DFS_POOL_STAT("Seqs. in use", pseq_used);
+
+exit:
+	if (len > size)
+		len = size;
+
+	retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+
+	return retval;
+}
+
+static const struct file_operations fops_dfs_stats = {
+	.read = ath10k_read_dfs_stats,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
 int ath10k_debug_create(struct ath10k *ar)
 {
 	ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
@@ -667,6 +810,23 @@ int ath10k_debug_create(struct ath10k *ar)
 	debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
 			    ar, &fops_htt_stats_mask);
 
+	debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy,
+			    ar, &fops_fw_dbglog);
+
+	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+		debugfs_create_file("dfs_simulate_radar", S_IWUSR,
+				    ar->debug.debugfs_phy, ar,
+				    &fops_simulate_radar);
+
+		debugfs_create_bool("dfs_block_radar_events", S_IWUSR,
+				    ar->debug.debugfs_phy,
+				    &ar->dfs_block_radar_events);
+
+		debugfs_create_file("dfs_stats", S_IRUSR,
+				    ar->debug.debugfs_phy, ar,
+				    &fops_dfs_stats);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 3cfe3ee90dbe..1773c36c71a0 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -33,6 +33,7 @@ enum ath10k_debug_mask {
 	ATH10K_DBG_MGMT		= 0x00000100,
 	ATH10K_DBG_DATA		= 0x00000200,
 	ATH10K_DBG_BMI		= 0x00000400,
+	ATH10K_DBG_REGULATORY	= 0x00000800,
 	ATH10K_DBG_ANY		= 0xffffffff,
 };
 
@@ -53,6 +54,8 @@ void ath10k_debug_read_service_map(struct ath10k *ar,
 void ath10k_debug_read_target_stats(struct ath10k *ar,
 				    struct wmi_stats_event *ev);
 
+#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
+
 #else
 static inline int ath10k_debug_start(struct ath10k *ar)
 {
@@ -82,6 +85,9 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
 						  struct wmi_stats_event *ev)
 {
 }
+
+#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
+
 #endif /* CONFIG_ATH10K_DEBUGFS */
 
 #ifdef CONFIG_ATH10K_DEBUG
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index edae50b52806..edc57ab505c8 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -191,6 +191,11 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
 	struct ath10k_htc *htc = &ar->htc;
 	struct ath10k_htc_ep *ep = &htc->endpoint[eid];
 
+	if (!skb) {
+		ath10k_warn("invalid sk_buff completion - NULL pointer. firmware crashed?\n");
+		return 0;
+	}
+
 	ath10k_htc_notify_tx_completion(ep, skb);
 	/* the skb now belongs to the completion handler */
 
@@ -534,14 +539,6 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
 	u16 credit_count;
 	u16 credit_size;
 
-	reinit_completion(&htc->ctl_resp);
-
-	status = ath10k_hif_start(htc->ar);
-	if (status) {
-		ath10k_err("could not start HIF (%d)\n", status);
-		goto err_start;
-	}
-
 	status = wait_for_completion_timeout(&htc->ctl_resp,
 					     ATH10K_HTC_WAIT_TIMEOUT_HZ);
 	if (status <= 0) {
@@ -549,15 +546,13 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
 			status = -ETIMEDOUT;
 
 		ath10k_err("ctl_resp never came in (%d)\n", status);
-		goto err_target;
+		return status;
 	}
 
 	if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
 		ath10k_err("Invalid HTC ready msg len:%d\n",
 			   htc->control_resp_len);
-
-		status = -ECOMM;
-		goto err_target;
+		return -ECOMM;
 	}
 
 	msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
@@ -567,8 +562,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
 
 	if (message_id != ATH10K_HTC_MSG_READY_ID) {
 		ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id);
-		status = -ECOMM;
-		goto err_target;
+		return -ECOMM;
 	}
 
 	htc->total_transmit_credits = credit_count;
@@ -581,9 +575,8 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
 
 	if ((htc->total_transmit_credits == 0) ||
 	    (htc->target_credit_size == 0)) {
-		status = -ECOMM;
 		ath10k_err("Invalid credit size received\n");
-		goto err_target;
+		return -ECOMM;
 	}
 
 	ath10k_htc_setup_target_buffer_assignments(htc);
@@ -600,14 +593,10 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
 	status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
 	if (status) {
 		ath10k_err("could not connect to htc service (%d)\n", status);
-		goto err_target;
+		return status;
 	}
 
 	return 0;
-err_target:
-	ath10k_hif_stop(htc->ar);
-err_start:
-	return status;
 }
 
 int ath10k_htc_connect_service(struct ath10k_htc *htc,
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 5f7eeebc5432..69697af59ce0 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -104,8 +104,8 @@ err_htc_attach:
 
 static int ath10k_htt_verify_version(struct ath10k_htt *htt)
 {
-	ath10k_info("htt target version %d.%d\n",
-		    htt->target_version_major, htt->target_version_minor);
+	ath10k_dbg(ATH10K_DBG_BOOT, "htt target version %d.%d\n",
+		   htt->target_version_major, htt->target_version_minor);
 
 	if (htt->target_version_major != 2 &&
 	    htt->target_version_major != 3) {
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 1a337e93b7e9..b93ae355bc08 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1182,6 +1182,8 @@ struct htt_rx_info {
 		u32 info2;
 	} rate;
 	bool fcs_err;
+	bool amsdu_more;
+	bool mic_err;
 };
 
 struct ath10k_htt {
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 90d4f74c28d7..fe8bd1b59f0e 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -659,23 +659,6 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
 	memcpy(hdr_buf, hdr, hdr_len);
 	hdr = (struct ieee80211_hdr *)hdr_buf;
 
-	/* FIXME: Hopefully this is a temporary measure.
-	 *
-	 * Reporting individual A-MSDU subframes means each reported frame
-	 * shares the same sequence number.
-	 *
-	 * mac80211 drops frames it recognizes as duplicates, i.e.
-	 * retransmission flag is set and sequence number matches sequence
-	 * number from a previous frame (as per IEEE 802.11-2012: 9.3.2.10
-	 * "Duplicate detection and recovery")
-	 *
-	 * To avoid frames being dropped clear retransmission flag for all
-	 * received A-MSDUs.
-	 *
-	 * Worst case: actual duplicate frames will be reported but this should
-	 * still be handled gracefully by other OSI/ISO layers. */
-	hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_RETRY);
-
 	first = skb;
 	while (skb) {
 		void *decap_hdr;
@@ -746,6 +729,9 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
 		skb = skb->next;
 		info->skb->next = NULL;
 
+		if (skb)
+			info->amsdu_more = true;
+
 		ath10k_process_rx(htt->ar, info);
 	}
 
@@ -852,6 +838,20 @@ static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
 	return false;
 }
 
+static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
+{
+	struct htt_rx_desc *rxd;
+	u32 flags;
+
+	rxd = (void *)skb->data - sizeof(*rxd);
+	flags = __le32_to_cpu(rxd->attention.flags);
+
+	if (flags & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
+		return true;
+
+	return false;
+}
+
 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
 {
 	struct htt_rx_desc *rxd;
@@ -959,6 +959,11 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
 				continue;
 			}
 
+			if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
+				ath10k_htt_rx_free_msdu_chain(msdu_head);
+				continue;
+			}
+
 			/* FIXME: we do not support chaining yet.
 			 * this needs investigation */
 			if (msdu_chaining) {
@@ -969,6 +974,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
 
 			info.skb     = msdu_head;
 			info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
+			info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head);
 			info.signal  = ATH10K_DEFAULT_NOISE_FLOOR;
 			info.signal += rx->ppdu.combined_rssi;
 
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index d9335e9d0d04..f1d36d2d2723 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -85,16 +85,13 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
 
 int ath10k_htt_tx_attach(struct ath10k_htt *htt)
 {
-	u8 pipe;
-
 	spin_lock_init(&htt->tx_lock);
 	init_waitqueue_head(&htt->empty_tx_wq);
 
-	/* At the beginning free queue number should hint us the maximum
-	 * queue length */
-	pipe = htt->ar->htc.endpoint[htt->eid].ul_pipe_id;
-	htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
-								   pipe);
+	if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features))
+		htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
+	else
+		htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC;
 
 	ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
 		   htt->max_num_pending_tx);
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 8aeb46d9b534..f1505a25d810 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -115,6 +115,7 @@ enum ath10k_mcast2ucast_mode {
 #define TARGET_10X_MAC_AGGR_DELIM		0
 #define TARGET_10X_AST_SKID_LIMIT		16
 #define TARGET_10X_NUM_PEERS			(128 + (TARGET_10X_NUM_VDEVS))
+#define TARGET_10X_NUM_PEERS_MAX		128
 #define TARGET_10X_NUM_OFFLOAD_PEERS		0
 #define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS	0
 #define TARGET_10X_NUM_PEER_KEYS		2
@@ -269,6 +270,7 @@ enum ath10k_mcast2ucast_mode {
 #define CORE_CTRL_CPU_INTR_MASK			0x00002000
 #define CORE_CTRL_ADDRESS			0x0000
 #define PCIE_INTR_ENABLE_ADDRESS		0x0008
+#define PCIE_INTR_CAUSE_ADDRESS			0x000c
 #define PCIE_INTR_CLR_ADDRESS			0x0014
 #define SCRATCH_3_ADDRESS			0x0030
 
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 97ac8c87cba2..776e364eadcd 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -322,12 +322,19 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
 	lockdep_assert_held(&ar->conf_mutex);
 
 	ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
-	if (ret)
+	if (ret) {
+		ath10k_warn("Failed to create wmi peer: %i\n", ret);
 		return ret;
+	}
 
 	ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
-	if (ret)
+	if (ret) {
+		ath10k_warn("Failed to wait for created wmi peer: %i\n", ret);
 		return ret;
+	}
+	spin_lock_bh(&ar->data_lock);
+	ar->num_peers++;
+	spin_unlock_bh(&ar->data_lock);
 
 	return 0;
 }
@@ -373,6 +380,10 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
 	if (ret)
 		return ret;
 
+	spin_lock_bh(&ar->data_lock);
+	ar->num_peers--;
+	spin_unlock_bh(&ar->data_lock);
+
 	return 0;
 }
 
@@ -392,6 +403,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
 
 		list_del(&peer->list);
 		kfree(peer);
+		ar->num_peers--;
 	}
 	spin_unlock_bh(&ar->data_lock);
 }
@@ -407,6 +419,7 @@ static void ath10k_peer_cleanup_all(struct ath10k *ar)
 		list_del(&peer->list);
 		kfree(peer);
 	}
+	ar->num_peers = 0;
 	spin_unlock_bh(&ar->data_lock);
 }
 
@@ -450,15 +463,19 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
 
 	arg.channel.mode = chan_to_phymode(&conf->chandef);
 
-	arg.channel.min_power = channel->max_power * 3;
-	arg.channel.max_power = channel->max_power * 4;
-	arg.channel.max_reg_power = channel->max_reg_power * 4;
-	arg.channel.max_antenna_gain = channel->max_antenna_gain;
+	arg.channel.min_power = 0;
+	arg.channel.max_power = channel->max_power * 2;
+	arg.channel.max_reg_power = channel->max_reg_power * 2;
+	arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
 
 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
 		arg.ssid = arvif->u.ap.ssid;
 		arg.ssid_len = arvif->u.ap.ssid_len;
 		arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+		/* For now allow DFS for AP mode */
+		arg.channel.chan_radar =
+			!!(channel->flags & IEEE80211_CHAN_RADAR);
 	} else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
 		arg.ssid = arvif->vif->bss_conf.ssid;
 		arg.ssid_len = arvif->vif->bss_conf.ssid_len;
@@ -516,6 +533,11 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
 
 	lockdep_assert_held(&ar->conf_mutex);
 
+	if (!ar->monitor_present) {
+		ath10k_warn("mac montor stop -- monitor is not present\n");
+		return -EINVAL;
+	}
+
 	arg.vdev_id = vdev_id;
 	arg.channel.freq = channel->center_freq;
 	arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1;
@@ -523,11 +545,13 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
 	/* TODO setup this dynamically, what in case we
 	   don't have any vifs? */
 	arg.channel.mode = chan_to_phymode(&ar->hw->conf.chandef);
+	arg.channel.chan_radar =
+			!!(channel->flags & IEEE80211_CHAN_RADAR);
 
-	arg.channel.min_power = channel->max_power * 3;
-	arg.channel.max_power = channel->max_power * 4;
-	arg.channel.max_reg_power = channel->max_reg_power * 4;
-	arg.channel.max_antenna_gain = channel->max_antenna_gain;
+	arg.channel.min_power = 0;
+	arg.channel.max_power = channel->max_power * 2;
+	arg.channel.max_reg_power = channel->max_reg_power * 2;
+	arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
 
 	ret = ath10k_wmi_vdev_start(ar, &arg);
 	if (ret) {
@@ -566,6 +590,16 @@ static int ath10k_monitor_stop(struct ath10k *ar)
 
 	lockdep_assert_held(&ar->conf_mutex);
 
+	if (!ar->monitor_present) {
+		ath10k_warn("mac montor stop -- monitor is not present\n");
+		return -EINVAL;
+	}
+
+	if (!ar->monitor_enabled) {
+		ath10k_warn("mac montor stop -- monitor is not enabled\n");
+		return -EINVAL;
+	}
+
 	ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
 	if (ret)
 		ath10k_warn("Monitor vdev down failed: %d\n", ret);
@@ -647,6 +681,107 @@ static int ath10k_monitor_destroy(struct ath10k *ar)
 	return ret;
 }
 
+static int ath10k_start_cac(struct ath10k *ar)
+{
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+
+	ret = ath10k_monitor_create(ar);
+	if (ret) {
+		clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+		return ret;
+	}
+
+	ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
+	if (ret) {
+		clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+		ath10k_monitor_destroy(ar);
+		return ret;
+	}
+
+	ath10k_dbg(ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
+		   ar->monitor_vdev_id);
+
+	return 0;
+}
+
+static int ath10k_stop_cac(struct ath10k *ar)
+{
+	lockdep_assert_held(&ar->conf_mutex);
+
+	/* CAC is not running - do nothing */
+	if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
+		return 0;
+
+	ath10k_monitor_stop(ar);
+	ath10k_monitor_destroy(ar);
+	clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+
+	ath10k_dbg(ATH10K_DBG_MAC, "mac cac finished\n");
+
+	return 0;
+}
+
+static const char *ath10k_dfs_state(enum nl80211_dfs_state dfs_state)
+{
+	switch (dfs_state) {
+	case NL80211_DFS_USABLE:
+		return "USABLE";
+	case NL80211_DFS_UNAVAILABLE:
+		return "UNAVAILABLE";
+	case NL80211_DFS_AVAILABLE:
+		return "AVAILABLE";
+	default:
+		WARN_ON(1);
+		return "bug";
+	}
+}
+
+static void ath10k_config_radar_detection(struct ath10k *ar)
+{
+	struct ieee80211_channel *chan = ar->hw->conf.chandef.chan;
+	bool radar = ar->hw->conf.radar_enabled;
+	bool chan_radar = !!(chan->flags & IEEE80211_CHAN_RADAR);
+	enum nl80211_dfs_state dfs_state = chan->dfs_state;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ath10k_dbg(ATH10K_DBG_MAC,
+		   "mac radar config update: chan %dMHz radar %d chan radar %d chan state %s\n",
+		   chan->center_freq, radar, chan_radar,
+		   ath10k_dfs_state(dfs_state));
+
+	/*
+	 * It's safe to call it even if CAC is not started.
+	 * This call here guarantees changing channel, etc. will stop CAC.
+	 */
+	ath10k_stop_cac(ar);
+
+	if (!radar)
+		return;
+
+	if (!chan_radar)
+		return;
+
+	if (dfs_state != NL80211_DFS_USABLE)
+		return;
+
+	ret = ath10k_start_cac(ar);
+	if (ret) {
+		/*
+		 * Not possible to start CAC on current channel so starting
+		 * radiation is not allowed, make this channel DFS_UNAVAILABLE
+		 * by indicating that radar was detected.
+		 */
+		ath10k_warn("failed to start CAC (%d)\n", ret);
+		ieee80211_radar_detected(ar->hw);
+	}
+}
+
 static void ath10k_control_beaconing(struct ath10k_vif *arvif,
 				struct ieee80211_bss_conf *info)
 {
@@ -1351,19 +1486,22 @@ static int ath10k_update_channel_list(struct ath10k *ar)
 			ch->allow_vht = true;
 
 			ch->allow_ibss =
-				!(channel->flags & IEEE80211_CHAN_NO_IBSS);
+				!(channel->flags & IEEE80211_CHAN_NO_IR);
 
 			ch->ht40plus =
 				!(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
 
-			passive = channel->flags & IEEE80211_CHAN_PASSIVE_SCAN;
+			ch->chan_radar =
+				!!(channel->flags & IEEE80211_CHAN_RADAR);
+
+			passive = channel->flags & IEEE80211_CHAN_NO_IR;
 			ch->passive = passive;
 
 			ch->freq = channel->center_freq;
-			ch->min_power = channel->max_power * 3;
-			ch->max_power = channel->max_power * 4;
-			ch->max_reg_power = channel->max_reg_power * 4;
-			ch->max_antenna_gain = channel->max_antenna_gain;
+			ch->min_power = 0;
+			ch->max_power = channel->max_power * 2;
+			ch->max_reg_power = channel->max_reg_power * 2;
+			ch->max_antenna_gain = channel->max_antenna_gain * 2;
 			ch->reg_class_id = 0; /* FIXME */
 
 			/* FIXME: why use only legacy modes, why not any
@@ -1423,9 +1561,20 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
 {
 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
 	struct ath10k *ar = hw->priv;
+	bool result;
 
 	ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
 
+	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+		ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
+			   request->dfs_region);
+		result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
+							  request->dfs_region);
+		if (!result)
+			ath10k_warn("dfs region 0x%X not supported, will trigger radar for every pulse\n",
+				    request->dfs_region);
+	}
+
 	mutex_lock(&ar->conf_mutex);
 	if (ar->state == ATH10K_STATE_ON)
 		ath10k_regd_update(ar);
@@ -1714,8 +1863,10 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
 			break;
 
 		ret = ath10k_wmi_mgmt_tx(ar, skb);
-		if (ret)
+		if (ret) {
 			ath10k_warn("wmi mgmt_tx failed (%d)\n", ret);
+			ieee80211_free_txskb(ar->hw, skb);
+		}
 	}
 }
 
@@ -1889,6 +2040,7 @@ void ath10k_halt(struct ath10k *ar)
 {
 	lockdep_assert_held(&ar->conf_mutex);
 
+	ath10k_stop_cac(ar);
 	del_timer_sync(&ar->scan.timeout);
 	ath10k_offchan_tx_purge(ar);
 	ath10k_mgmt_over_wmi_tx_purge(ar);
@@ -1943,7 +2095,7 @@ static int ath10k_start(struct ieee80211_hw *hw)
 		ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n",
 			    ret);
 
-	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 0);
+	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
 	if (ret)
 		ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
 			    ret);
@@ -1998,15 +2150,40 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
 	struct ath10k *ar = hw->priv;
 	struct ieee80211_conf *conf = &hw->conf;
 	int ret = 0;
+	u32 param;
 
 	mutex_lock(&ar->conf_mutex);
 
 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
-		ath10k_dbg(ATH10K_DBG_MAC, "mac config channel %d mhz\n",
-			   conf->chandef.chan->center_freq);
+		ath10k_dbg(ATH10K_DBG_MAC,
+			   "mac config channel %d mhz flags 0x%x\n",
+			   conf->chandef.chan->center_freq,
+			   conf->chandef.chan->flags);
+
 		spin_lock_bh(&ar->data_lock);
 		ar->rx_channel = conf->chandef.chan;
 		spin_unlock_bh(&ar->data_lock);
+
+		ath10k_config_radar_detection(ar);
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_POWER) {
+		ath10k_dbg(ATH10K_DBG_MAC, "mac config power %d\n",
+			   hw->conf.power_level);
+
+		param = ar->wmi.pdev_param->txpower_limit2g;
+		ret = ath10k_wmi_pdev_set_param(ar, param,
+						hw->conf.power_level * 2);
+		if (ret)
+			ath10k_warn("mac failed to set 2g txpower %d (%d)\n",
+				    hw->conf.power_level, ret);
+
+		param = ar->wmi.pdev_param->txpower_limit5g;
+		ret = ath10k_wmi_pdev_set_param(ar, param,
+						hw->conf.power_level * 2);
+		if (ret)
+			ath10k_warn("mac failed to set 5g txpower %d (%d)\n",
+				    hw->conf.power_level, ret);
 	}
 
 	if (changed & IEEE80211_CONF_CHANGE_PS)
@@ -2037,7 +2214,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
 	enum wmi_sta_powersave_param param;
 	int ret = 0;
-	u32 value;
+	u32 value, param_id;
 	int bit;
 	u32 vdev_param;
 
@@ -2049,6 +2226,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
 	arvif->vif = vif;
 
 	INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
+	INIT_LIST_HEAD(&arvif->list);
 
 	if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
 		ath10k_warn("Only one monitor interface allowed\n");
@@ -2128,6 +2306,13 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
 			ath10k_warn("Failed to create peer for AP: %d\n", ret);
 			goto err_vdev_delete;
 		}
+
+		param_id = ar->wmi.pdev_param->sta_kickout_th;
+
+		/* Disable STA KICKOUT functionality in FW */
+		ret = ath10k_wmi_pdev_set_param(ar, param_id, 0);
+		if (ret)
+			ath10k_warn("Failed to disable STA KICKOUT\n");
 	}
 
 	if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
@@ -2265,8 +2450,14 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
 	*total_flags &= SUPPORTED_FILTERS;
 	ar->filter_flags = *total_flags;
 
+	/* Monitor must not be started if it wasn't created first.
+	 * Promiscuous mode may be started on a non-monitor interface - in
+	 * such case the monitor vdev is not created so starting the
+	 * monitor makes no sense. Since ath10k uses no special RX filters
+	 * (only BSS filter in STA mode) there's no need for any special
+	 * action here. */
 	if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
-	    !ar->monitor_enabled) {
+	    !ar->monitor_enabled && ar->monitor_present) {
 		ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n",
 			   ar->monitor_vdev_id);
 
@@ -2274,7 +2465,7 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
 		if (ret)
 			ath10k_warn("Unable to start monitor mode\n");
 	} else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
-		   ar->monitor_enabled) {
+		   ar->monitor_enabled && ar->monitor_present) {
 		ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n",
 			   ar->monitor_vdev_id);
 
@@ -2360,8 +2551,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
 			ret = ath10k_peer_create(ar, arvif->vdev_id,
 						 info->bssid);
 			if (ret)
-				ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
-					    info->bssid, arvif->vdev_id);
+				ath10k_warn("Failed to add peer %pM for vdev %d when changin bssid: %i\n",
+					    info->bssid, arvif->vdev_id, ret);
 
 			if (vif->type == NL80211_IFTYPE_STATION) {
 				/*
@@ -2542,6 +2733,44 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
 	mutex_unlock(&ar->conf_mutex);
 }
 
+static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
+					struct ath10k_vif *arvif,
+					enum set_key_cmd cmd,
+					struct ieee80211_key_conf *key)
+{
+	u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
+	int ret;
+
+	/* 10.1 firmware branch requires default key index to be set to group
+	 * key index after installing it. Otherwise FW/HW Txes corrupted
+	 * frames with multi-vif APs. This is not required for main firmware
+	 * branch (e.g. 636).
+	 *
+	 * FIXME: This has been tested only in AP. It remains unknown if this
+	 * is required for multi-vif STA interfaces on 10.1 */
+
+	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+		return;
+
+	if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
+		return;
+
+	if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
+		return;
+
+	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+		return;
+
+	if (cmd != SET_KEY)
+		return;
+
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+					key->keyidx);
+	if (ret)
+		ath10k_warn("failed to set group key as default key: %d\n",
+			    ret);
+}
+
 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 			  struct ieee80211_vif *vif, struct ieee80211_sta *sta,
 			  struct ieee80211_key_conf *key)
@@ -2603,6 +2832,8 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 		goto exit;
 	}
 
+	ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
+
 	spin_lock_bh(&ar->data_lock);
 	peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
 	if (peer && cmd == SET_KEY)
@@ -2627,6 +2858,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
 {
 	struct ath10k *ar = hw->priv;
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	int max_num_peers;
 	int ret = 0;
 
 	mutex_lock(&ar->conf_mutex);
@@ -2637,14 +2869,26 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
 		/*
 		 * New station addition.
 		 */
+		if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+			max_num_peers = TARGET_10X_NUM_PEERS_MAX - 1;
+		else
+			max_num_peers = TARGET_NUM_PEERS;
+
+		if (ar->num_peers >= max_num_peers) {
+			ath10k_warn("Number of peers exceeded: peers number %d (max peers %d)\n",
+				    ar->num_peers, max_num_peers);
+			ret = -ENOBUFS;
+			goto exit;
+		}
+
 		ath10k_dbg(ATH10K_DBG_MAC,
-			   "mac vdev %d peer create %pM (new sta)\n",
-			   arvif->vdev_id, sta->addr);
+			   "mac vdev %d peer create %pM (new sta) num_peers %d\n",
+			   arvif->vdev_id, sta->addr, ar->num_peers);
 
 		ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
 		if (ret)
-			ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
-				    sta->addr, arvif->vdev_id);
+			ath10k_warn("Failed to add peer %pM for vdev %d when adding a new sta: %i\n",
+				    sta->addr, arvif->vdev_id, ret);
 	} else if ((old_state == IEEE80211_STA_NONE &&
 		    new_state == IEEE80211_STA_NOTEXIST)) {
 		/*
@@ -2689,7 +2933,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
 			ath10k_warn("Failed to disassociate station: %pM\n",
 				    sta->addr);
 	}
-
+exit:
 	mutex_unlock(&ar->conf_mutex);
 	return ret;
 }
@@ -3095,6 +3339,307 @@ exit:
 	return ret;
 }
 
+/* Helper table for legacy fixed_rate/bitrate_mask */
+static const u8 cck_ofdm_rate[] = {
+	/* CCK */
+	3, /* 1Mbps */
+	2, /* 2Mbps */
+	1, /* 5.5Mbps */
+	0, /* 11Mbps */
+	/* OFDM */
+	3, /* 6Mbps */
+	7, /* 9Mbps */
+	2, /* 12Mbps */
+	6, /* 18Mbps */
+	1, /* 24Mbps */
+	5, /* 36Mbps */
+	0, /* 48Mbps */
+	4, /* 54Mbps */
+};
+
+/* Check if only one bit set */
+static int ath10k_check_single_mask(u32 mask)
+{
+	int bit;
+
+	bit = ffs(mask);
+	if (!bit)
+		return 0;
+
+	mask &= ~BIT(bit - 1);
+	if (mask)
+		return 2;
+
+	return 1;
+}
+
+static bool
+ath10k_default_bitrate_mask(struct ath10k *ar,
+			    enum ieee80211_band band,
+			    const struct cfg80211_bitrate_mask *mask)
+{
+	u32 legacy = 0x00ff;
+	u8 ht = 0xff, i;
+	u16 vht = 0x3ff;
+
+	switch (band) {
+	case IEEE80211_BAND_2GHZ:
+		legacy = 0x00fff;
+		vht = 0;
+		break;
+	case IEEE80211_BAND_5GHZ:
+		break;
+	default:
+		return false;
+	}
+
+	if (mask->control[band].legacy != legacy)
+		return false;
+
+	for (i = 0; i < ar->num_rf_chains; i++)
+		if (mask->control[band].ht_mcs[i] != ht)
+			return false;
+
+	for (i = 0; i < ar->num_rf_chains; i++)
+		if (mask->control[band].vht_mcs[i] != vht)
+			return false;
+
+	return true;
+}
+
+static bool
+ath10k_bitrate_mask_nss(const struct cfg80211_bitrate_mask *mask,
+			enum ieee80211_band band,
+			u8 *fixed_nss)
+{
+	int ht_nss = 0, vht_nss = 0, i;
+
+	/* check legacy */
+	if (ath10k_check_single_mask(mask->control[band].legacy))
+		return false;
+
+	/* check HT */
+	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
+		if (mask->control[band].ht_mcs[i] == 0xff)
+			continue;
+		else if (mask->control[band].ht_mcs[i] == 0x00)
+			break;
+		else
+			return false;
+	}
+
+	ht_nss = i;
+
+	/* check VHT */
+	for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+		if (mask->control[band].vht_mcs[i] == 0x03ff)
+			continue;
+		else if (mask->control[band].vht_mcs[i] == 0x0000)
+			break;
+		else
+			return false;
+	}
+
+	vht_nss = i;
+
+	if (ht_nss > 0 && vht_nss > 0)
+		return false;
+
+	if (ht_nss)
+		*fixed_nss = ht_nss;
+	else if (vht_nss)
+		*fixed_nss = vht_nss;
+	else
+		return false;
+
+	return true;
+}
+
+static bool
+ath10k_bitrate_mask_correct(const struct cfg80211_bitrate_mask *mask,
+			    enum ieee80211_band band,
+			    enum wmi_rate_preamble *preamble)
+{
+	int legacy = 0, ht = 0, vht = 0, i;
+
+	*preamble = WMI_RATE_PREAMBLE_OFDM;
+
+	/* check legacy */
+	legacy = ath10k_check_single_mask(mask->control[band].legacy);
+	if (legacy > 1)
+		return false;
+
+	/* check HT */
+	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
+		ht += ath10k_check_single_mask(mask->control[band].ht_mcs[i]);
+	if (ht > 1)
+		return false;
+
+	/* check VHT */
+	for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+		vht += ath10k_check_single_mask(mask->control[band].vht_mcs[i]);
+	if (vht > 1)
+		return false;
+
+	/* Currently we support only one fixed_rate */
+	if ((legacy + ht + vht) != 1)
+		return false;
+
+	if (ht)
+		*preamble = WMI_RATE_PREAMBLE_HT;
+	else if (vht)
+		*preamble = WMI_RATE_PREAMBLE_VHT;
+
+	return true;
+}
+
+static bool
+ath10k_bitrate_mask_rate(const struct cfg80211_bitrate_mask *mask,
+			 enum ieee80211_band band,
+			 u8 *fixed_rate,
+			 u8 *fixed_nss)
+{
+	u8 rate = 0, pream = 0, nss = 0, i;
+	enum wmi_rate_preamble preamble;
+
+	/* Check if single rate correct */
+	if (!ath10k_bitrate_mask_correct(mask, band, &preamble))
+		return false;
+
+	pream = preamble;
+
+	switch (preamble) {
+	case WMI_RATE_PREAMBLE_CCK:
+	case WMI_RATE_PREAMBLE_OFDM:
+		i = ffs(mask->control[band].legacy) - 1;
+
+		if (band == IEEE80211_BAND_2GHZ && i < 4)
+			pream = WMI_RATE_PREAMBLE_CCK;
+
+		if (band == IEEE80211_BAND_5GHZ)
+			i += 4;
+
+		if (i >= ARRAY_SIZE(cck_ofdm_rate))
+			return false;
+
+		rate = cck_ofdm_rate[i];
+		break;
+	case WMI_RATE_PREAMBLE_HT:
+		for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
+			if (mask->control[band].ht_mcs[i])
+				break;
+
+		if (i == IEEE80211_HT_MCS_MASK_LEN)
+			return false;
+
+		rate = ffs(mask->control[band].ht_mcs[i]) - 1;
+		nss = i;
+		break;
+	case WMI_RATE_PREAMBLE_VHT:
+		for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+			if (mask->control[band].vht_mcs[i])
+				break;
+
+		if (i == NL80211_VHT_NSS_MAX)
+			return false;
+
+		rate = ffs(mask->control[band].vht_mcs[i]) - 1;
+		nss = i;
+		break;
+	}
+
+	*fixed_nss = nss + 1;
+	nss <<= 4;
+	pream <<= 6;
+
+	ath10k_dbg(ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n",
+		   pream, nss, rate);
+
+	*fixed_rate = pream | nss | rate;
+
+	return true;
+}
+
+static bool ath10k_get_fixed_rate_nss(const struct cfg80211_bitrate_mask *mask,
+				      enum ieee80211_band band,
+				      u8 *fixed_rate,
+				      u8 *fixed_nss)
+{
+	/* First check full NSS mask, if we can simply limit NSS */
+	if (ath10k_bitrate_mask_nss(mask, band, fixed_nss))
+		return true;
+
+	/* Next Check single rate is set */
+	return ath10k_bitrate_mask_rate(mask, band, fixed_rate, fixed_nss);
+}
+
+static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
+				       u8 fixed_rate,
+				       u8 fixed_nss)
+{
+	struct ath10k *ar = arvif->ar;
+	u32 vdev_param;
+	int ret = 0;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (arvif->fixed_rate == fixed_rate &&
+	    arvif->fixed_nss == fixed_nss)
+		goto exit;
+
+	if (fixed_rate == WMI_FIXED_RATE_NONE)
+		ath10k_dbg(ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
+
+	vdev_param = ar->wmi.vdev_param->fixed_rate;
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+					vdev_param, fixed_rate);
+	if (ret) {
+		ath10k_warn("Could not set fixed_rate param 0x%02x: %d\n",
+			    fixed_rate, ret);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	arvif->fixed_rate = fixed_rate;
+
+	vdev_param = ar->wmi.vdev_param->nss;
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+					vdev_param, fixed_nss);
+
+	if (ret) {
+		ath10k_warn("Could not set fixed_nss param %d: %d\n",
+			    fixed_nss, ret);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	arvif->fixed_nss = fixed_nss;
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif,
+				   const struct cfg80211_bitrate_mask *mask)
+{
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct ath10k *ar = arvif->ar;
+	enum ieee80211_band band = ar->hw->conf.chandef.chan->band;
+	u8 fixed_rate = WMI_FIXED_RATE_NONE;
+	u8 fixed_nss = ar->num_rf_chains;
+
+	if (!ath10k_default_bitrate_mask(ar, band, mask)) {
+		if (!ath10k_get_fixed_rate_nss(mask, band,
+					       &fixed_rate,
+					       &fixed_nss))
+			return -EINVAL;
+	}
+
+	return ath10k_set_fixed_rate_param(arvif, fixed_rate, fixed_nss);
+}
+
 static const struct ieee80211_ops ath10k_ops = {
 	.tx				= ath10k_tx,
 	.start				= ath10k_start,
@@ -3117,6 +3662,7 @@ static const struct ieee80211_ops ath10k_ops = {
 	.tx_last_beacon			= ath10k_tx_last_beacon,
 	.restart_complete		= ath10k_restart_complete,
 	.get_survey			= ath10k_get_survey,
+	.set_bitrate_mask		= ath10k_set_bitrate_mask,
 #ifdef CONFIG_PM
 	.suspend			= ath10k_suspend,
 	.resume				= ath10k_resume,
@@ -3249,12 +3795,37 @@ static const struct ieee80211_iface_limit ath10k_if_limits[] = {
 	},
 };
 
-static const struct ieee80211_iface_combination ath10k_if_comb = {
-	.limits = ath10k_if_limits,
-	.n_limits = ARRAY_SIZE(ath10k_if_limits),
-	.max_interfaces = 8,
-	.num_different_channels = 1,
-	.beacon_int_infra_match = true,
+static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
+	{
+	.max	= 8,
+	.types	= BIT(NL80211_IFTYPE_AP)
+	},
+};
+
+static const struct ieee80211_iface_combination ath10k_if_comb[] = {
+	{
+		.limits = ath10k_if_limits,
+		.n_limits = ARRAY_SIZE(ath10k_if_limits),
+		.max_interfaces = 8,
+		.num_different_channels = 1,
+		.beacon_int_infra_match = true,
+	},
+};
+
+static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
+	{
+		.limits = ath10k_10x_if_limits,
+		.n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
+		.max_interfaces = 8,
+		.num_different_channels = 1,
+		.beacon_int_infra_match = true,
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+					BIT(NL80211_CHAN_WIDTH_20) |
+					BIT(NL80211_CHAN_WIDTH_40) |
+					BIT(NL80211_CHAN_WIDTH_80),
+#endif
+	},
 };
 
 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
@@ -3433,9 +4004,12 @@ int ath10k_mac_register(struct ath10k *ar)
 	ar->hw->wiphy->interface_modes =
 		BIT(NL80211_IFTYPE_STATION) |
 		BIT(NL80211_IFTYPE_ADHOC) |
-		BIT(NL80211_IFTYPE_AP) |
-		BIT(NL80211_IFTYPE_P2P_CLIENT) |
-		BIT(NL80211_IFTYPE_P2P_GO);
+		BIT(NL80211_IFTYPE_AP);
+
+	if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
+		ar->hw->wiphy->interface_modes |=
+			BIT(NL80211_IFTYPE_P2P_CLIENT) |
+			BIT(NL80211_IFTYPE_P2P_GO);
 
 	ar->hw->flags = IEEE80211_HW_SIGNAL_DBM |
 			IEEE80211_HW_SUPPORTS_PS |
@@ -3465,7 +4039,6 @@ int ath10k_mac_register(struct ath10k *ar)
 
 	ar->hw->vif_data_size = sizeof(struct ath10k_vif);
 
-	ar->hw->channel_change_time = 5000;
 	ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
 
 	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -3478,11 +4051,28 @@ int ath10k_mac_register(struct ath10k *ar)
 	 */
 	ar->hw->queues = 4;
 
-	ar->hw->wiphy->iface_combinations = &ath10k_if_comb;
-	ar->hw->wiphy->n_iface_combinations = 1;
+	if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+		ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
+		ar->hw->wiphy->n_iface_combinations =
+			ARRAY_SIZE(ath10k_10x_if_comb);
+	} else {
+		ar->hw->wiphy->iface_combinations = ath10k_if_comb;
+		ar->hw->wiphy->n_iface_combinations =
+			ARRAY_SIZE(ath10k_if_comb);
+	}
 
 	ar->hw->netdev_features = NETIF_F_HW_CSUM;
 
+	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+		/* Init ath dfs pattern detector */
+		ar->ath_common.debug_mask = ATH_DBG_DFS;
+		ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
+							     NL80211_DFS_UNSET);
+
+		if (!ar->dfs_detector)
+			ath10k_warn("dfs pattern detector init failed\n");
+	}
+
 	ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
 			    ath10k_reg_notifier);
 	if (ret) {
@@ -3518,6 +4108,9 @@ void ath10k_mac_unregister(struct ath10k *ar)
 {
 	ieee80211_unregister_hw(ar->hw);
 
+	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+		ar->dfs_detector->exit(ar->dfs_detector);
+
 	kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
 	kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
 
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 9e86a811086f..29fd197d1fd8 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
+#include <linux/bitops.h>
 
 #include "core.h"
 #include "debug.h"
@@ -32,10 +33,21 @@
 #include "ce.h"
 #include "pci.h"
 
+enum ath10k_pci_irq_mode {
+	ATH10K_PCI_IRQ_AUTO = 0,
+	ATH10K_PCI_IRQ_LEGACY = 1,
+	ATH10K_PCI_IRQ_MSI = 2,
+};
+
 static unsigned int ath10k_target_ps;
+static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
+
 module_param(ath10k_target_ps, uint, 0644);
 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
 
+module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
+MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
+
 #define QCA988X_2_0_DEVICE_ID	(0x003c)
 
 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
@@ -52,10 +64,16 @@ static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
 					     int num);
 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
 static void ath10k_pci_stop_ce(struct ath10k *ar);
-static void ath10k_pci_device_reset(struct ath10k *ar);
-static int ath10k_pci_reset_target(struct ath10k *ar);
-static int ath10k_pci_start_intr(struct ath10k *ar);
-static void ath10k_pci_stop_intr(struct ath10k *ar);
+static int ath10k_pci_device_reset(struct ath10k *ar);
+static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
+static int ath10k_pci_init_irq(struct ath10k *ar);
+static int ath10k_pci_deinit_irq(struct ath10k *ar);
+static int ath10k_pci_request_irq(struct ath10k *ar);
+static void ath10k_pci_free_irq(struct ath10k *ar);
+static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
+			       struct ath10k_ce_pipe *rx_pipe,
+			       struct bmi_xfer *xfer);
+static void ath10k_pci_cleanup_ce(struct ath10k *ar);
 
 static const struct ce_attr host_ce_config_wlan[] = {
 	/* CE0: host->target HTC control and raw streams */
@@ -200,6 +218,87 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
 	/* CE7 used only by Host */
 };
 
+static bool ath10k_pci_irq_pending(struct ath10k *ar)
+{
+	u32 cause;
+
+	/* Check if the shared legacy irq is for us */
+	cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+				  PCIE_INTR_CAUSE_ADDRESS);
+	if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
+		return true;
+
+	return false;
+}
+
+static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
+{
+	/* IMPORTANT: INTR_CLR register has to be set after
+	 * INTR_ENABLE is set to 0, otherwise interrupt can not be
+	 * really cleared. */
+	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+			   0);
+	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
+			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+	/* IMPORTANT: this extra read transaction is required to
+	 * flush the posted write buffer. */
+	(void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+				 PCIE_INTR_ENABLE_ADDRESS);
+}
+
+static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
+{
+	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+			   PCIE_INTR_ENABLE_ADDRESS,
+			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+	/* IMPORTANT: this extra read transaction is required to
+	 * flush the posted write buffer. */
+	(void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+				 PCIE_INTR_ENABLE_ADDRESS);
+}
+
+static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
+{
+	struct ath10k *ar = arg;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+	if (ar_pci->num_msi_intrs == 0) {
+		if (!ath10k_pci_irq_pending(ar))
+			return IRQ_NONE;
+
+		ath10k_pci_disable_and_clear_legacy_irq(ar);
+	}
+
+	tasklet_schedule(&ar_pci->early_irq_tasklet);
+
+	return IRQ_HANDLED;
+}
+
+static int ath10k_pci_request_early_irq(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int ret;
+
+	/* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
+	 * interrupt from irq vector is triggered in all cases for FW
+	 * indication/errors */
+	ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
+			  IRQF_SHARED, "ath10k_pci (early)", ar);
+	if (ret) {
+		ath10k_warn("failed to request early irq: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void ath10k_pci_free_early_irq(struct ath10k *ar)
+{
+	free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
+}
+
 /*
  * Diagnostic read/write access is provided for startup/config/debug usage.
  * Caller must guarantee proper alignment, when applicable, and single user
@@ -526,17 +625,6 @@ static bool ath10k_pci_target_is_awake(struct ath10k *ar)
 	return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
 }
 
-static void ath10k_pci_wait(struct ath10k *ar)
-{
-	int n = 100;
-
-	while (n-- && !ath10k_pci_target_is_awake(ar))
-		msleep(10);
-
-	if (n < 0)
-		ath10k_warn("Unable to wakeup target\n");
-}
-
 int ath10k_do_pci_wake(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -723,7 +811,7 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
 	ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
 			     flags);
 	if (ret)
-		ath10k_warn("CE send failed: %p\n", nbuf);
+		ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
 
 	return ret;
 }
@@ -750,9 +838,10 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
 		   ar->fw_version_build);
 
 	host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
-	if (ath10k_pci_diag_read_mem(ar, host_addr,
-				     &reg_dump_area, sizeof(u32)) != 0) {
-		ath10k_warn("could not read hi_failure_state\n");
+	ret = ath10k_pci_diag_read_mem(ar, host_addr,
+				       &reg_dump_area, sizeof(u32));
+	if (ret) {
+		ath10k_err("failed to read FW dump area address: %d\n", ret);
 		return;
 	}
 
@@ -762,7 +851,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
 				       &reg_dump_values[0],
 				       REG_DUMP_COUNT_QCA988X * sizeof(u32));
 	if (ret != 0) {
-		ath10k_err("could not dump FW Dump Area\n");
+		ath10k_err("failed to read FW dump area: %d\n", ret);
 		return;
 	}
 
@@ -777,7 +866,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
 			   reg_dump_values[i + 2],
 			   reg_dump_values[i + 3]);
 
-	ieee80211_queue_work(ar->hw, &ar->restart_work);
+	queue_work(ar->workqueue, &ar->restart_work);
 }
 
 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
@@ -815,53 +904,41 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
 	       sizeof(ar_pci->msg_callbacks_current));
 }
 
-static int ath10k_pci_start_ce(struct ath10k *ar)
+static int ath10k_pci_alloc_compl(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
 	const struct ce_attr *attr;
 	struct ath10k_pci_pipe *pipe_info;
 	struct ath10k_pci_compl *compl;
-	int i, pipe_num, completions, disable_interrupts;
+	int i, pipe_num, completions;
 
 	spin_lock_init(&ar_pci->compl_lock);
 	INIT_LIST_HEAD(&ar_pci->compl_process);
 
-	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
 		pipe_info = &ar_pci->pipe_info[pipe_num];
 
 		spin_lock_init(&pipe_info->pipe_lock);
 		INIT_LIST_HEAD(&pipe_info->compl_free);
 
 		/* Handle Diagnostic CE specially */
-		if (pipe_info->ce_hdl == ce_diag)
+		if (pipe_info->ce_hdl == ar_pci->ce_diag)
 			continue;
 
 		attr = &host_ce_config_wlan[pipe_num];
 		completions = 0;
 
-		if (attr->src_nentries) {
-			disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
-			ath10k_ce_send_cb_register(pipe_info->ce_hdl,
-						   ath10k_pci_ce_send_done,
-						   disable_interrupts);
+		if (attr->src_nentries)
 			completions += attr->src_nentries;
-		}
 
-		if (attr->dest_nentries) {
-			ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
-						   ath10k_pci_ce_recv_data);
+		if (attr->dest_nentries)
 			completions += attr->dest_nentries;
-		}
-
-		if (completions == 0)
-			continue;
 
 		for (i = 0; i < completions; i++) {
 			compl = kmalloc(sizeof(*compl), GFP_KERNEL);
 			if (!compl) {
 				ath10k_warn("No memory for completion state\n");
-				ath10k_pci_stop_ce(ar);
+				ath10k_pci_cleanup_ce(ar);
 				return -ENOMEM;
 			}
 
@@ -873,20 +950,55 @@ static int ath10k_pci_start_ce(struct ath10k *ar)
 	return 0;
 }
 
-static void ath10k_pci_stop_ce(struct ath10k *ar)
+static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct ath10k_pci_compl *compl;
-	struct sk_buff *skb;
-	int i;
+	const struct ce_attr *attr;
+	struct ath10k_pci_pipe *pipe_info;
+	int pipe_num, disable_interrupts;
 
-	ath10k_ce_disable_interrupts(ar);
+	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+		pipe_info = &ar_pci->pipe_info[pipe_num];
+
+		/* Handle Diagnostic CE specially */
+		if (pipe_info->ce_hdl == ar_pci->ce_diag)
+			continue;
+
+		attr = &host_ce_config_wlan[pipe_num];
+
+		if (attr->src_nentries) {
+			disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
+			ath10k_ce_send_cb_register(pipe_info->ce_hdl,
+						   ath10k_pci_ce_send_done,
+						   disable_interrupts);
+		}
+
+		if (attr->dest_nentries)
+			ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
+						   ath10k_pci_ce_recv_data);
+	}
+
+	return 0;
+}
+
+static void ath10k_pci_kill_tasklet(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int i;
 
-	/* Cancel the pending tasklet */
 	tasklet_kill(&ar_pci->intr_tq);
+	tasklet_kill(&ar_pci->msi_fw_err);
+	tasklet_kill(&ar_pci->early_irq_tasklet);
 
 	for (i = 0; i < CE_COUNT; i++)
 		tasklet_kill(&ar_pci->pipe_info[i].intr);
+}
+
+static void ath10k_pci_stop_ce(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct ath10k_pci_compl *compl;
+	struct sk_buff *skb;
 
 	/* Mark pending completions as aborted, so that upper layers free up
 	 * their associated resources */
@@ -920,7 +1032,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
 	spin_unlock_bh(&ar_pci->compl_lock);
 
 	/* Free unused completions for each pipe. */
-	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
 		pipe_info = &ar_pci->pipe_info[pipe_num];
 
 		spin_lock_bh(&pipe_info->pipe_lock);
@@ -974,8 +1086,8 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
 		case ATH10K_PCI_COMPL_RECV:
 			ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
 			if (ret) {
-				ath10k_warn("Unable to post recv buffer for pipe: %d\n",
-					    compl->pipe_info->pipe_num);
+				ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
+					    compl->pipe_info->pipe_num, ret);
 				break;
 			}
 
@@ -1114,7 +1226,7 @@ static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
 	for (i = 0; i < num; i++) {
 		skb = dev_alloc_skb(pipe_info->buf_sz);
 		if (!skb) {
-			ath10k_warn("could not allocate skbuff for pipe %d\n",
+			ath10k_warn("failed to allocate skbuff for pipe %d\n",
 				    num);
 			ret = -ENOMEM;
 			goto err;
@@ -1127,7 +1239,7 @@ static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
 					 DMA_FROM_DEVICE);
 
 		if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
-			ath10k_warn("could not dma map skbuff\n");
+			ath10k_warn("failed to DMA map sk_buff\n");
 			dev_kfree_skb_any(skb);
 			ret = -EIO;
 			goto err;
@@ -1142,7 +1254,7 @@ static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
 		ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
 						 ce_data);
 		if (ret) {
-			ath10k_warn("could not enqueue to pipe %d (%d)\n",
+			ath10k_warn("failed to enqueue to pipe %d: %d\n",
 				    num, ret);
 			goto err;
 		}
@@ -1162,7 +1274,7 @@ static int ath10k_pci_post_rx(struct ath10k *ar)
 	const struct ce_attr *attr;
 	int pipe_num, ret = 0;
 
-	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
 		pipe_info = &ar_pci->pipe_info[pipe_num];
 		attr = &host_ce_config_wlan[pipe_num];
 
@@ -1172,8 +1284,8 @@ static int ath10k_pci_post_rx(struct ath10k *ar)
 		ret = ath10k_pci_post_rx_pipe(pipe_info,
 					      attr->dest_nentries - 1);
 		if (ret) {
-			ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
-				    pipe_num);
+			ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
+				    pipe_num, ret);
 
 			for (; pipe_num >= 0; pipe_num--) {
 				pipe_info = &ar_pci->pipe_info[pipe_num];
@@ -1189,23 +1301,58 @@ static int ath10k_pci_post_rx(struct ath10k *ar)
 static int ath10k_pci_hif_start(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	int ret;
+	int ret, ret_early;
 
-	ret = ath10k_pci_start_ce(ar);
+	ath10k_pci_free_early_irq(ar);
+	ath10k_pci_kill_tasklet(ar);
+
+	ret = ath10k_pci_alloc_compl(ar);
 	if (ret) {
-		ath10k_warn("could not start CE (%d)\n", ret);
-		return ret;
+		ath10k_warn("failed to allocate CE completions: %d\n", ret);
+		goto err_early_irq;
+	}
+
+	ret = ath10k_pci_request_irq(ar);
+	if (ret) {
+		ath10k_warn("failed to post RX buffers for all pipes: %d\n",
+			    ret);
+		goto err_free_compl;
+	}
+
+	ret = ath10k_pci_setup_ce_irq(ar);
+	if (ret) {
+		ath10k_warn("failed to setup CE interrupts: %d\n", ret);
+		goto err_stop;
 	}
 
 	/* Post buffers once to start things off. */
 	ret = ath10k_pci_post_rx(ar);
 	if (ret) {
-		ath10k_warn("could not post rx pipes (%d)\n", ret);
-		return ret;
+		ath10k_warn("failed to post RX buffers for all pipes: %d\n",
+			    ret);
+		goto err_stop;
 	}
 
 	ar_pci->started = 1;
 	return 0;
+
+err_stop:
+	ath10k_ce_disable_interrupts(ar);
+	ath10k_pci_free_irq(ar);
+	ath10k_pci_kill_tasklet(ar);
+	ath10k_pci_stop_ce(ar);
+	ath10k_pci_process_ce(ar);
+err_free_compl:
+	ath10k_pci_cleanup_ce(ar);
+err_early_irq:
+	/* Though there should be no interrupts (device was reset)
+	 * power_down() expects the early IRQ to be installed as per the
+	 * driver lifecycle. */
+	ret_early = ath10k_pci_request_early_irq(ar);
+	if (ret_early)
+		ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
+
+	return ret;
 }
 
 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
@@ -1271,6 +1418,13 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
 		 * Indicate the completion to higer layer to free
 		 * the buffer
 		 */
+
+		if (!netbuf) {
+			ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
+				    ce_hdl->id);
+			continue;
+		}
+
 		ATH10K_SKB_CB(netbuf)->is_aborted = true;
 		ar_pci->msg_callbacks_current.tx_completion(ar,
 							    netbuf,
@@ -1291,7 +1445,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	int pipe_num;
 
-	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
 		struct ath10k_pci_pipe *pipe_info;
 
 		pipe_info = &ar_pci->pipe_info[pipe_num];
@@ -1306,7 +1460,7 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar)
 	struct ath10k_pci_pipe *pipe_info;
 	int pipe_num;
 
-	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
 		pipe_info = &ar_pci->pipe_info[pipe_num];
 		if (pipe_info->ce_hdl) {
 			ath10k_ce_deinit(pipe_info->ce_hdl);
@@ -1316,27 +1470,25 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar)
 	}
 }
 
-static void ath10k_pci_disable_irqs(struct ath10k *ar)
-{
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	int i;
-
-	for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
-		disable_irq(ar_pci->pdev->irq + i);
-}
-
 static void ath10k_pci_hif_stop(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int ret;
 
 	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
 
-	/* Irqs are never explicitly re-enabled. They are implicitly re-enabled
-	 * by ath10k_pci_start_intr(). */
-	ath10k_pci_disable_irqs(ar);
+	ret = ath10k_ce_disable_interrupts(ar);
+	if (ret)
+		ath10k_warn("failed to disable CE interrupts: %d\n", ret);
 
+	ath10k_pci_free_irq(ar);
+	ath10k_pci_kill_tasklet(ar);
 	ath10k_pci_stop_ce(ar);
 
+	ret = ath10k_pci_request_early_irq(ar);
+	if (ret)
+		ath10k_warn("failed to re-enable early irq: %d\n", ret);
+
 	/* At this point, asynchronous threads are stopped, the target should
 	 * not DMA nor interrupt. We process the leftovers and then free
 	 * everything else up. */
@@ -1345,6 +1497,13 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
 	ath10k_pci_cleanup_ce(ar);
 	ath10k_pci_buffer_cleanup(ar);
 
+	/* Make the sure the device won't access any structures on the host by
+	 * resetting it. The device was fed with PCI CE ringbuffer
+	 * configuration during init. If ringbuffers are freed and the device
+	 * were to access them this could lead to memory corruption on the
+	 * host. */
+	ath10k_pci_device_reset(ar);
+
 	ar_pci->started = 0;
 }
 
@@ -1363,6 +1522,8 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
 	void *treq, *tresp = NULL;
 	int ret = 0;
 
+	might_sleep();
+
 	if (resp && !resp_len)
 		return -EINVAL;
 
@@ -1403,14 +1564,12 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
 	if (ret)
 		goto err_resp;
 
-	ret = wait_for_completion_timeout(&xfer.done,
-					  BMI_COMMUNICATION_TIMEOUT_HZ);
-	if (ret <= 0) {
+	ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
+	if (ret) {
 		u32 unused_buffer;
 		unsigned int unused_nbytes;
 		unsigned int unused_id;
 
-		ret = -ETIMEDOUT;
 		ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
 					   &unused_nbytes, &unused_id);
 	} else {
@@ -1478,6 +1637,25 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
 	complete(&xfer->done);
 }
 
+static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
+			       struct ath10k_ce_pipe *rx_pipe,
+			       struct bmi_xfer *xfer)
+{
+	unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
+
+	while (time_before_eq(jiffies, timeout)) {
+		ath10k_pci_bmi_send_done(tx_pipe);
+		ath10k_pci_bmi_recv_data(rx_pipe);
+
+		if (completion_done(&xfer->done))
+			return 0;
+
+		schedule();
+	}
+
+	return -ETIMEDOUT;
+}
+
 /*
  * Map from service/endpoint to Copy Engine.
  * This table is derived from the CE_PCI TABLE, above.
@@ -1587,7 +1765,7 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
 					      CORE_CTRL_ADDRESS,
 					  &core_ctrl);
 	if (ret) {
-		ath10k_warn("Unable to read core ctrl\n");
+		ath10k_warn("failed to read core_ctrl: %d\n", ret);
 		return ret;
 	}
 
@@ -1597,10 +1775,13 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
 	ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
 					       CORE_CTRL_ADDRESS,
 					   core_ctrl);
-	if (ret)
-		ath10k_warn("Unable to set interrupt mask\n");
+	if (ret) {
+		ath10k_warn("failed to set target CPU interrupt mask: %d\n",
+			    ret);
+		return ret;
+	}
 
-	return ret;
+	return 0;
 }
 
 static int ath10k_pci_init_config(struct ath10k *ar)
@@ -1751,7 +1932,7 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
 	const struct ce_attr *attr;
 	int pipe_num;
 
-	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
 		pipe_info = &ar_pci->pipe_info[pipe_num];
 		pipe_info->pipe_num = pipe_num;
 		pipe_info->hif_ce_state = ar;
@@ -1759,7 +1940,7 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
 
 		pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
 		if (pipe_info->ce_hdl == NULL) {
-			ath10k_err("Unable to initialize CE for pipe: %d\n",
+			ath10k_err("failed to initialize CE for pipe: %d\n",
 				   pipe_num);
 
 			/* It is safe to call it here. It checks if ce_hdl is
@@ -1768,31 +1949,18 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
 			return -1;
 		}
 
-		if (pipe_num == ar_pci->ce_count - 1) {
+		if (pipe_num == CE_COUNT - 1) {
 			/*
 			 * Reserve the ultimate CE for
 			 * diagnostic Window support
 			 */
-			ar_pci->ce_diag =
-			ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
+			ar_pci->ce_diag = pipe_info->ce_hdl;
 			continue;
 		}
 
 		pipe_info->buf_sz = (size_t) (attr->src_sz_max);
 	}
 
-	/*
-	 * Initially, establish CE completion handlers for use with BMI.
-	 * These are overwritten with generic handlers after we exit BMI phase.
-	 */
-	pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
-	ath10k_ce_send_cb_register(pipe_info->ce_hdl,
-				   ath10k_pci_bmi_send_done, 0);
-
-	pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
-	ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
-				   ath10k_pci_bmi_recv_data);
-
 	return 0;
 }
 
@@ -1828,14 +1996,9 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
 static int ath10k_pci_hif_power_up(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	const char *irq_mode;
 	int ret;
 
-	ret = ath10k_pci_start_intr(ar);
-	if (ret) {
-		ath10k_err("could not start interrupt handling (%d)\n", ret);
-		goto err;
-	}
-
 	/*
 	 * Bring the target up cleanly.
 	 *
@@ -1846,39 +2009,80 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
 	 * is in an unexpected state. We try to catch that here in order to
 	 * reset the Target and retry the probe.
 	 */
-	ath10k_pci_device_reset(ar);
-
-	ret = ath10k_pci_reset_target(ar);
-	if (ret)
-		goto err_irq;
+	ret = ath10k_pci_device_reset(ar);
+	if (ret) {
+		ath10k_err("failed to reset target: %d\n", ret);
+		goto err;
+	}
 
 	if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
 		/* Force AWAKE forever */
 		ath10k_do_pci_wake(ar);
 
 	ret = ath10k_pci_ce_init(ar);
-	if (ret)
+	if (ret) {
+		ath10k_err("failed to initialize CE: %d\n", ret);
 		goto err_ps;
+	}
 
-	ret = ath10k_pci_init_config(ar);
-	if (ret)
+	ret = ath10k_ce_disable_interrupts(ar);
+	if (ret) {
+		ath10k_err("failed to disable CE interrupts: %d\n", ret);
 		goto err_ce;
+	}
 
-	ret = ath10k_pci_wake_target_cpu(ar);
+	ret = ath10k_pci_init_irq(ar);
 	if (ret) {
-		ath10k_err("could not wake up target CPU (%d)\n", ret);
+		ath10k_err("failed to init irqs: %d\n", ret);
 		goto err_ce;
 	}
 
+	ret = ath10k_pci_request_early_irq(ar);
+	if (ret) {
+		ath10k_err("failed to request early irq: %d\n", ret);
+		goto err_deinit_irq;
+	}
+
+	ret = ath10k_pci_wait_for_target_init(ar);
+	if (ret) {
+		ath10k_err("failed to wait for target to init: %d\n", ret);
+		goto err_free_early_irq;
+	}
+
+	ret = ath10k_pci_init_config(ar);
+	if (ret) {
+		ath10k_err("failed to setup init config: %d\n", ret);
+		goto err_free_early_irq;
+	}
+
+	ret = ath10k_pci_wake_target_cpu(ar);
+	if (ret) {
+		ath10k_err("could not wake up target CPU: %d\n", ret);
+		goto err_free_early_irq;
+	}
+
+	if (ar_pci->num_msi_intrs > 1)
+		irq_mode = "MSI-X";
+	else if (ar_pci->num_msi_intrs == 1)
+		irq_mode = "MSI";
+	else
+		irq_mode = "legacy";
+
+	if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
+		ath10k_info("pci irq %s\n", irq_mode);
+
 	return 0;
 
+err_free_early_irq:
+	ath10k_pci_free_early_irq(ar);
+err_deinit_irq:
+	ath10k_pci_deinit_irq(ar);
 err_ce:
 	ath10k_pci_ce_deinit(ar);
+	ath10k_pci_device_reset(ar);
 err_ps:
 	if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
 		ath10k_do_pci_sleep(ar);
-err_irq:
-	ath10k_pci_stop_intr(ar);
 err:
 	return ret;
 }
@@ -1887,7 +2091,10 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
-	ath10k_pci_stop_intr(ar);
+	ath10k_pci_free_early_irq(ar);
+	ath10k_pci_kill_tasklet(ar);
+	ath10k_pci_deinit_irq(ar);
+	ath10k_pci_device_reset(ar);
 
 	ath10k_pci_ce_deinit(ar);
 	if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
@@ -2023,25 +2230,10 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
 	if (ar_pci->num_msi_intrs == 0) {
-		/*
-		 * IMPORTANT: INTR_CLR regiser has to be set after
-		 * INTR_ENABLE is set to 0, otherwise interrupt can not be
-		 * really cleared.
-		 */
-		iowrite32(0, ar_pci->mem +
-			  (SOC_CORE_BASE_ADDRESS |
-			   PCIE_INTR_ENABLE_ADDRESS));
-		iowrite32(PCIE_INTR_FIRMWARE_MASK |
-			  PCIE_INTR_CE_MASK_ALL,
-			  ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
-					 PCIE_INTR_CLR_ADDRESS));
-		/*
-		 * IMPORTANT: this extra read transaction is required to
-		 * flush the posted write buffer.
-		 */
-		(void) ioread32(ar_pci->mem +
-				(SOC_CORE_BASE_ADDRESS |
-				 PCIE_INTR_ENABLE_ADDRESS));
+		if (!ath10k_pci_irq_pending(ar))
+			return IRQ_NONE;
+
+		ath10k_pci_disable_and_clear_legacy_irq(ar);
 	}
 
 	tasklet_schedule(&ar_pci->intr_tq);
@@ -2049,6 +2241,34 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
 	return IRQ_HANDLED;
 }
 
+static void ath10k_pci_early_irq_tasklet(unsigned long data)
+{
+	struct ath10k *ar = (struct ath10k *)data;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	u32 fw_ind;
+	int ret;
+
+	ret = ath10k_pci_wake(ar);
+	if (ret) {
+		ath10k_warn("failed to wake target in early irq tasklet: %d\n",
+			    ret);
+		return;
+	}
+
+	fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
+	if (fw_ind & FW_IND_EVENT_PENDING) {
+		ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
+				   fw_ind & ~FW_IND_EVENT_PENDING);
+
+		/* Some structures are unavailable during early boot or at
+		 * driver teardown so just print that the device has crashed. */
+		ath10k_warn("device crashed - no diagnostics available\n");
+	}
+
+	ath10k_pci_sleep(ar);
+	ath10k_pci_enable_legacy_irq(ar);
+}
+
 static void ath10k_pci_tasklet(unsigned long data)
 {
 	struct ath10k *ar = (struct ath10k *)data;
@@ -2057,40 +2277,22 @@ static void ath10k_pci_tasklet(unsigned long data)
 	ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
 	ath10k_ce_per_engine_service_any(ar);
 
-	if (ar_pci->num_msi_intrs == 0) {
-		/* Enable Legacy PCI line interrupts */
-		iowrite32(PCIE_INTR_FIRMWARE_MASK |
-			  PCIE_INTR_CE_MASK_ALL,
-			  ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
-					 PCIE_INTR_ENABLE_ADDRESS));
-		/*
-		 * IMPORTANT: this extra read transaction is required to
-		 * flush the posted write buffer
-		 */
-		(void) ioread32(ar_pci->mem +
-				(SOC_CORE_BASE_ADDRESS |
-				 PCIE_INTR_ENABLE_ADDRESS));
-	}
+	/* Re-enable legacy irq that was disabled in the irq handler */
+	if (ar_pci->num_msi_intrs == 0)
+		ath10k_pci_enable_legacy_irq(ar);
 }
 
-static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
+static int ath10k_pci_request_irq_msix(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	int ret;
-	int i;
-
-	ret = pci_enable_msi_block(ar_pci->pdev, num);
-	if (ret)
-		return ret;
+	int ret, i;
 
 	ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
 			  ath10k_pci_msi_fw_handler,
 			  IRQF_SHARED, "ath10k_pci", ar);
 	if (ret) {
-		ath10k_warn("request_irq(%d) failed %d\n",
+		ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
 			    ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
-
-		pci_disable_msi(ar_pci->pdev);
 		return ret;
 	}
 
@@ -2099,44 +2301,38 @@ static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
 				  ath10k_pci_per_engine_handler,
 				  IRQF_SHARED, "ath10k_pci", ar);
 		if (ret) {
-			ath10k_warn("request_irq(%d) failed %d\n",
+			ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
 				    ar_pci->pdev->irq + i, ret);
 
 			for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
 				free_irq(ar_pci->pdev->irq + i, ar);
 
 			free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
-			pci_disable_msi(ar_pci->pdev);
 			return ret;
 		}
 	}
 
-	ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
 	return 0;
 }
 
-static int ath10k_pci_start_intr_msi(struct ath10k *ar)
+static int ath10k_pci_request_irq_msi(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	int ret;
 
-	ret = pci_enable_msi(ar_pci->pdev);
-	if (ret < 0)
-		return ret;
-
 	ret = request_irq(ar_pci->pdev->irq,
 			  ath10k_pci_interrupt_handler,
 			  IRQF_SHARED, "ath10k_pci", ar);
-	if (ret < 0) {
-		pci_disable_msi(ar_pci->pdev);
+	if (ret) {
+		ath10k_warn("failed to request MSI irq %d: %d\n",
+			    ar_pci->pdev->irq, ret);
 		return ret;
 	}
 
-	ath10k_info("MSI interrupt handling\n");
 	return 0;
 }
 
-static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
+static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	int ret;
@@ -2144,112 +2340,165 @@ static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
 	ret = request_irq(ar_pci->pdev->irq,
 			  ath10k_pci_interrupt_handler,
 			  IRQF_SHARED, "ath10k_pci", ar);
-	if (ret < 0)
+	if (ret) {
+		ath10k_warn("failed to request legacy irq %d: %d\n",
+			    ar_pci->pdev->irq, ret);
 		return ret;
+	}
 
-	/*
-	 * Make sure to wake the Target before enabling Legacy
-	 * Interrupt.
-	 */
-	iowrite32(PCIE_SOC_WAKE_V_MASK,
-		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
-		  PCIE_SOC_WAKE_ADDRESS);
+	return 0;
+}
+
+static int ath10k_pci_request_irq(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
-	ath10k_pci_wait(ar);
+	switch (ar_pci->num_msi_intrs) {
+	case 0:
+		return ath10k_pci_request_irq_legacy(ar);
+	case 1:
+		return ath10k_pci_request_irq_msi(ar);
+	case MSI_NUM_REQUEST:
+		return ath10k_pci_request_irq_msix(ar);
+	}
 
-	/*
-	 * A potential race occurs here: The CORE_BASE write
-	 * depends on target correctly decoding AXI address but
-	 * host won't know when target writes BAR to CORE_CTRL.
-	 * This write might get lost if target has NOT written BAR.
-	 * For now, fix the race by repeating the write in below
-	 * synchronization checking.
-	 */
-	iowrite32(PCIE_INTR_FIRMWARE_MASK |
-		  PCIE_INTR_CE_MASK_ALL,
-		  ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
-				 PCIE_INTR_ENABLE_ADDRESS));
-	iowrite32(PCIE_SOC_WAKE_RESET,
-		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
-		  PCIE_SOC_WAKE_ADDRESS);
-
-	ath10k_info("legacy interrupt handling\n");
-	return 0;
+	ath10k_warn("unknown irq configuration upon request\n");
+	return -EINVAL;
 }
 
-static int ath10k_pci_start_intr(struct ath10k *ar)
+static void ath10k_pci_free_irq(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int i;
+
+	/* There's at least one interrupt irregardless whether its legacy INTR
+	 * or MSI or MSI-X */
+	for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
+		free_irq(ar_pci->pdev->irq + i, ar);
+}
+
+static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	int num = MSI_NUM_REQUEST;
-	int ret;
 	int i;
 
-	tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
+	tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
 	tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
-		     (unsigned long) ar);
+		     (unsigned long)ar);
+	tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
+		     (unsigned long)ar);
 
 	for (i = 0; i < CE_COUNT; i++) {
 		ar_pci->pipe_info[i].ar_pci = ar_pci;
-		tasklet_init(&ar_pci->pipe_info[i].intr,
-			     ath10k_pci_ce_tasklet,
+		tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
 			     (unsigned long)&ar_pci->pipe_info[i]);
 	}
+}
+
+static int ath10k_pci_init_irq(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
+				       ar_pci->features);
+	int ret;
 
-	if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
-		num = 1;
+	ath10k_pci_init_irq_tasklets(ar);
 
-	if (num > 1) {
-		ret = ath10k_pci_start_intr_msix(ar, num);
+	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
+	    !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
+		ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
+
+	/* Try MSI-X */
+	if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
+		ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
+		ret = pci_enable_msi_block(ar_pci->pdev, ar_pci->num_msi_intrs);
 		if (ret == 0)
-			goto exit;
+			return 0;
+		if (ret > 0)
+			pci_disable_msi(ar_pci->pdev);
 
-		ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
-		num = 1;
+		/* fall-through */
 	}
 
-	if (num == 1) {
-		ret = ath10k_pci_start_intr_msi(ar);
+	/* Try MSI */
+	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
+		ar_pci->num_msi_intrs = 1;
+		ret = pci_enable_msi(ar_pci->pdev);
 		if (ret == 0)
-			goto exit;
+			return 0;
 
-		ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
-			    ret);
-		num = 0;
+		/* fall-through */
 	}
 
-	ret = ath10k_pci_start_intr_legacy(ar);
+	/* Try legacy irq
+	 *
+	 * A potential race occurs here: The CORE_BASE write
+	 * depends on target correctly decoding AXI address but
+	 * host won't know when target writes BAR to CORE_CTRL.
+	 * This write might get lost if target has NOT written BAR.
+	 * For now, fix the race by repeating the write in below
+	 * synchronization checking. */
+	ar_pci->num_msi_intrs = 0;
 
-exit:
-	ar_pci->num_msi_intrs = num;
-	ar_pci->ce_count = CE_COUNT;
-	return ret;
+	ret = ath10k_pci_wake(ar);
+	if (ret) {
+		ath10k_warn("failed to wake target: %d\n", ret);
+		return ret;
+	}
+
+	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+	ath10k_pci_sleep(ar);
+
+	return 0;
 }
 
-static void ath10k_pci_stop_intr(struct ath10k *ar)
+static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
 {
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	int i;
+	int ret;
 
-	/* There's at least one interrupt irregardless whether its legacy INTR
-	 * or MSI or MSI-X */
-	for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
-		free_irq(ar_pci->pdev->irq + i, ar);
+	ret = ath10k_pci_wake(ar);
+	if (ret) {
+		ath10k_warn("failed to wake target: %d\n", ret);
+		return ret;
+	}
 
-	if (ar_pci->num_msi_intrs > 0)
+	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+			   0);
+	ath10k_pci_sleep(ar);
+
+	return 0;
+}
+
+static int ath10k_pci_deinit_irq(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+	switch (ar_pci->num_msi_intrs) {
+	case 0:
+		return ath10k_pci_deinit_irq_legacy(ar);
+	case 1:
+		/* fall-through */
+	case MSI_NUM_REQUEST:
 		pci_disable_msi(ar_pci->pdev);
+		return 0;
+	}
+
+	ath10k_warn("unknown irq configuration upon deinit\n");
+	return -EINVAL;
 }
 
-static int ath10k_pci_reset_target(struct ath10k *ar)
+static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	int wait_limit = 300; /* 3 sec */
+	int ret;
 
-	/* Wait for Target to finish initialization before we proceed. */
-	iowrite32(PCIE_SOC_WAKE_V_MASK,
-		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
-		  PCIE_SOC_WAKE_ADDRESS);
-
-	ath10k_pci_wait(ar);
+	ret = ath10k_pci_wake(ar);
+	if (ret) {
+		ath10k_err("failed to wake up target: %d\n", ret);
+		return ret;
+	}
 
 	while (wait_limit-- &&
 	       !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
@@ -2264,34 +2513,26 @@ static int ath10k_pci_reset_target(struct ath10k *ar)
 	}
 
 	if (wait_limit < 0) {
-		ath10k_err("Target stalled\n");
-		iowrite32(PCIE_SOC_WAKE_RESET,
-			  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
-			  PCIE_SOC_WAKE_ADDRESS);
-		return -EIO;
+		ath10k_err("target stalled\n");
+		ret = -EIO;
+		goto out;
 	}
 
-	iowrite32(PCIE_SOC_WAKE_RESET,
-		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
-		  PCIE_SOC_WAKE_ADDRESS);
-
-	return 0;
+out:
+	ath10k_pci_sleep(ar);
+	return ret;
 }
 
-static void ath10k_pci_device_reset(struct ath10k *ar)
+static int ath10k_pci_device_reset(struct ath10k *ar)
 {
-	int i;
+	int i, ret;
 	u32 val;
 
-	if (!SOC_GLOBAL_RESET_ADDRESS)
-		return;
-
-	ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
-			       PCIE_SOC_WAKE_V_MASK);
-	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
-		if (ath10k_pci_target_is_awake(ar))
-			break;
-		msleep(1);
+	ret = ath10k_do_pci_wake(ar);
+	if (ret) {
+		ath10k_err("failed to wake up target: %d\n",
+			   ret);
+		return ret;
 	}
 
 	/* Put Target, including PCIe, into RESET. */
@@ -2317,7 +2558,8 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
 		msleep(1);
 	}
 
-	ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
+	ath10k_do_pci_sleep(ar);
+	return 0;
 }
 
 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
@@ -2374,7 +2616,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
 
 	ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
 	if (!ar) {
-		ath10k_err("ath10k_core_create failed!\n");
+		ath10k_err("failed to create driver core\n");
 		ret = -EINVAL;
 		goto err_ar_pci;
 	}
@@ -2393,20 +2635,20 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
 	 */
 	ret = pci_assign_resource(pdev, BAR_NUM);
 	if (ret) {
-		ath10k_err("cannot assign PCI space: %d\n", ret);
+		ath10k_err("failed to assign PCI space: %d\n", ret);
 		goto err_ar;
 	}
 
 	ret = pci_enable_device(pdev);
 	if (ret) {
-		ath10k_err("cannot enable PCI device: %d\n", ret);
+		ath10k_err("failed to enable PCI device: %d\n", ret);
 		goto err_ar;
 	}
 
 	/* Request MMIO resources */
 	ret = pci_request_region(pdev, BAR_NUM, "ath");
 	if (ret) {
-		ath10k_err("PCI MMIO reservation error: %d\n", ret);
+		ath10k_err("failed to request MMIO region: %d\n", ret);
 		goto err_device;
 	}
 
@@ -2416,13 +2658,13 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
 	 */
 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 	if (ret) {
-		ath10k_err("32-bit DMA not available: %d\n", ret);
+		ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
 		goto err_region;
 	}
 
 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
 	if (ret) {
-		ath10k_err("cannot enable 32-bit consistent DMA\n");
+		ath10k_err("failed to set consistent DMA mask to 32-bit\n");
 		goto err_region;
 	}
 
@@ -2439,7 +2681,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
 	/* Arrange for access to Target SoC registers. */
 	mem = pci_iomap(pdev, BAR_NUM, 0);
 	if (!mem) {
-		ath10k_err("PCI iomap error\n");
+		ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
 		ret = -EIO;
 		goto err_master;
 	}
@@ -2451,11 +2693,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
 	ret = ath10k_do_pci_wake(ar);
 	if (ret) {
 		ath10k_err("Failed to get chip id: %d\n", ret);
-		return ret;
+		goto err_iomap;
 	}
 
-	chip_id = ath10k_pci_read32(ar,
-				    RTC_SOC_BASE_ADDRESS + SOC_CHIP_ID_ADDRESS);
+	chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
 
 	ath10k_do_pci_sleep(ar);
 
@@ -2463,7 +2704,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
 
 	ret = ath10k_core_register(ar, chip_id);
 	if (ret) {
-		ath10k_err("could not register driver core (%d)\n", ret);
+		ath10k_err("failed to register driver core: %d\n", ret);
 		goto err_iomap;
 	}
 
@@ -2529,7 +2770,7 @@ static int __init ath10k_pci_init(void)
 
 	ret = pci_register_driver(&ath10k_pci_driver);
 	if (ret)
-		ath10k_err("pci_register_driver failed [%d]\n", ret);
+		ath10k_err("failed to register PCI driver: %d\n", ret);
 
 	return ret;
 }
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 52fb7b973571..a4f32038c440 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -198,9 +198,7 @@ struct ath10k_pci {
 
 	struct tasklet_struct intr_tq;
 	struct tasklet_struct msi_fw_err;
-
-	/* Number of Copy Engines supported */
-	unsigned int ce_count;
+	struct tasklet_struct early_irq_tasklet;
 
 	int started;
 
@@ -318,6 +316,16 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
 	return ioread32(ar_pci->mem + offset);
 }
 
+static inline u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
+{
+	return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
+}
+
+static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
+}
+
 int ath10k_do_pci_wake(struct ath10k *ar);
 void ath10k_do_pci_sleep(struct ath10k *ar);
 
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 90817ddc92ba..4eb2ecbc06ef 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -182,6 +182,27 @@ TRACE_EVENT(ath10k_htt_stats,
 	)
 );
 
+TRACE_EVENT(ath10k_wmi_dbglog,
+	TP_PROTO(void *buf, size_t buf_len),
+
+	TP_ARGS(buf, buf_len),
+
+	TP_STRUCT__entry(
+		__field(size_t, buf_len)
+		__dynamic_array(u8, buf, buf_len)
+	),
+
+	TP_fast_assign(
+		__entry->buf_len = buf_len;
+		memcpy(__get_dynamic_array(buf), buf, buf_len);
+	),
+
+	TP_printk(
+		"len %zu",
+		__entry->buf_len
+	)
+);
+
 #endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
 
 /* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 5ae373a1e294..74f45fa6f428 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -75,6 +75,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
 	ath10k_report_offchan_tx(htt->ar, msdu);
 
 	info = IEEE80211_SKB_CB(msdu);
+	memset(&info->status, 0, sizeof(info->status));
 
 	if (tx_done->discard) {
 		ieee80211_free_txskb(htt->ar->hw, msdu);
@@ -183,7 +184,7 @@ static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
 		/* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
 		   TODO check this */
 		mcs = (info2 >> 4) & 0x0F;
-		nss = (info1 >> 10) & 0x07;
+		nss = ((info1 >> 10) & 0x07) + 1;
 		bw = info1 & 3;
 		sgi = info2 & 1;
 
@@ -230,12 +231,15 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
 				~IEEE80211_FCTL_PROTECTED);
 	}
 
-	if (info->status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
+	if (info->mic_err)
 		status->flag |= RX_FLAG_MMIC_ERROR;
 
 	if (info->fcs_err)
 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
 
+	if (info->amsdu_more)
+		status->flag |= RX_FLAG_AMSDU_MORE;
+
 	status->signal = info->signal;
 
 	spin_lock_bh(&ar->data_lock);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index ccf3597fd9e2..712a606a080a 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -16,6 +16,7 @@
  */
 
 #include <linux/skbuff.h>
+#include <linux/ctype.h>
 
 #include "core.h"
 #include "htc.h"
@@ -674,10 +675,8 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
 
 	/* Send the management frame buffer to the target */
 	ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
-	if (ret) {
-		dev_kfree_skb_any(skb);
+	if (ret)
 		return ret;
-	}
 
 	/* TODO: report tx status to mac80211 - temporary just ACK */
 	info->flags |= IEEE80211_TX_STAT_ACK;
@@ -877,6 +876,7 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
 	struct wmi_mgmt_rx_event_v2 *ev_v2;
 	struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+	struct ieee80211_channel *ch;
 	struct ieee80211_hdr *hdr;
 	u32 rx_status;
 	u32 channel;
@@ -909,6 +909,11 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
 	ath10k_dbg(ATH10K_DBG_MGMT,
 		   "event mgmt rx status %08x\n", rx_status);
 
+	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
+		dev_kfree_skb(skb);
+		return 0;
+	}
+
 	if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
 		dev_kfree_skb(skb);
 		return 0;
@@ -924,7 +929,25 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
 	if (rx_status & WMI_RX_STATUS_ERR_MIC)
 		status->flag |= RX_FLAG_MMIC_ERROR;
 
-	status->band = phy_mode_to_band(phy_mode);
+	/* HW can Rx CCK rates on 5GHz. In that case phy_mode is set to
+	 * MODE_11B. This means phy_mode is not a reliable source for the band
+	 * of mgmt rx. */
+
+	ch = ar->scan_channel;
+	if (!ch)
+		ch = ar->rx_channel;
+
+	if (ch) {
+		status->band = ch->band;
+
+		if (phy_mode == MODE_11B &&
+		    status->band == IEEE80211_BAND_5GHZ)
+			ath10k_dbg(ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
+	} else {
+		ath10k_warn("using (unreliable) phy_mode to extract band for mgmt rx\n");
+		status->band = phy_mode_to_band(phy_mode);
+	}
+
 	status->freq = ieee80211_channel_to_frequency(channel, status->band);
 	status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
 	status->rate_idx = get_rate_idx(rate, status->band);
@@ -934,7 +957,11 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
 	hdr = (struct ieee80211_hdr *)skb->data;
 	fc = le16_to_cpu(hdr->frame_control);
 
-	if (fc & IEEE80211_FCTL_PROTECTED) {
+	/* FW delivers WEP Shared Auth frame with Protected Bit set and
+	 * encrypted payload. However in case of PMF it delivers decrypted
+	 * frames with Protected Bit set. */
+	if (ieee80211_has_protected(hdr->frame_control) &&
+	    !ieee80211_is_auth(hdr->frame_control)) {
 		status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
 				RX_FLAG_MMIC_STRIPPED;
 		hdr->frame_control = __cpu_to_le16(fc &
@@ -1044,9 +1071,14 @@ static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
 }
 
-static void ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
 {
-	ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_MESG_EVENTID\n");
+	ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
+		   skb->len);
+
+	trace_ath10k_wmi_dbglog(skb->data, skb->len);
+
+	return 0;
 }
 
 static void ath10k_wmi_event_update_stats(struct ath10k *ar,
@@ -1383,9 +1415,259 @@ static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
 	ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
 }
 
+static void ath10k_dfs_radar_report(struct ath10k *ar,
+				    struct wmi_single_phyerr_rx_event *event,
+				    struct phyerr_radar_report *rr,
+				    u64 tsf)
+{
+	u32 reg0, reg1, tsf32l;
+	struct pulse_event pe;
+	u64 tsf64;
+	u8 rssi, width;
+
+	reg0 = __le32_to_cpu(rr->reg0);
+	reg1 = __le32_to_cpu(rr->reg1);
+
+	ath10k_dbg(ATH10K_DBG_REGULATORY,
+		   "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
+		   MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
+		   MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
+		   MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
+		   MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
+	ath10k_dbg(ATH10K_DBG_REGULATORY,
+		   "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
+		   MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
+		   MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
+		   MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
+		   MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
+		   MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
+	ath10k_dbg(ATH10K_DBG_REGULATORY,
+		   "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
+		   MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
+		   MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
+
+	if (!ar->dfs_detector)
+		return;
+
+	/* report event to DFS pattern detector */
+	tsf32l = __le32_to_cpu(event->hdr.tsf_timestamp);
+	tsf64 = tsf & (~0xFFFFFFFFULL);
+	tsf64 |= tsf32l;
+
+	width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
+	rssi = event->hdr.rssi_combined;
+
+	/* hardware store this as 8 bit signed value,
+	 * set to zero if negative number
+	 */
+	if (rssi & 0x80)
+		rssi = 0;
+
+	pe.ts = tsf64;
+	pe.freq = ar->hw->conf.chandef.chan->center_freq;
+	pe.width = width;
+	pe.rssi = rssi;
+
+	ath10k_dbg(ATH10K_DBG_REGULATORY,
+		   "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
+		   pe.freq, pe.width, pe.rssi, pe.ts);
+
+	ATH10K_DFS_STAT_INC(ar, pulses_detected);
+
+	if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) {
+		ath10k_dbg(ATH10K_DBG_REGULATORY,
+			   "dfs no pulse pattern detected, yet\n");
+		return;
+	}
+
+	ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs radar detected\n");
+	ATH10K_DFS_STAT_INC(ar, radar_detected);
+
+	/* Control radar events reporting in debugfs file
+	   dfs_block_radar_events */
+	if (ar->dfs_block_radar_events) {
+		ath10k_info("DFS Radar detected, but ignored as requested\n");
+		return;
+	}
+
+	ieee80211_radar_detected(ar->hw);
+}
+
+static int ath10k_dfs_fft_report(struct ath10k *ar,
+				 struct wmi_single_phyerr_rx_event *event,
+				 struct phyerr_fft_report *fftr,
+				 u64 tsf)
+{
+	u32 reg0, reg1;
+	u8 rssi, peak_mag;
+
+	reg0 = __le32_to_cpu(fftr->reg0);
+	reg1 = __le32_to_cpu(fftr->reg1);
+	rssi = event->hdr.rssi_combined;
+
+	ath10k_dbg(ATH10K_DBG_REGULATORY,
+		   "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
+		   MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
+		   MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
+		   MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
+		   MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
+	ath10k_dbg(ATH10K_DBG_REGULATORY,
+		   "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
+		   MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
+		   MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
+		   MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG),
+		   MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB));
+
+	peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
+
+	/* false event detection */
+	if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
+	    peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
+		ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
+		ATH10K_DFS_STAT_INC(ar, pulses_discarded);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void ath10k_wmi_event_dfs(struct ath10k *ar,
+				 struct wmi_single_phyerr_rx_event *event,
+				 u64 tsf)
+{
+	int buf_len, tlv_len, res, i = 0;
+	struct phyerr_tlv *tlv;
+	struct phyerr_radar_report *rr;
+	struct phyerr_fft_report *fftr;
+	u8 *tlv_buf;
+
+	buf_len = __le32_to_cpu(event->hdr.buf_len);
+	ath10k_dbg(ATH10K_DBG_REGULATORY,
+		   "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
+		   event->hdr.phy_err_code, event->hdr.rssi_combined,
+		   __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len);
+
+	/* Skip event if DFS disabled */
+	if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
+		return;
+
+	ATH10K_DFS_STAT_INC(ar, pulses_total);
+
+	while (i < buf_len) {
+		if (i + sizeof(*tlv) > buf_len) {
+			ath10k_warn("too short buf for tlv header (%d)\n", i);
+			return;
+		}
+
+		tlv = (struct phyerr_tlv *)&event->bufp[i];
+		tlv_len = __le16_to_cpu(tlv->len);
+		tlv_buf = &event->bufp[i + sizeof(*tlv)];
+		ath10k_dbg(ATH10K_DBG_REGULATORY,
+			   "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
+			   tlv_len, tlv->tag, tlv->sig);
+
+		switch (tlv->tag) {
+		case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
+			if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
+				ath10k_warn("too short radar pulse summary (%d)\n",
+					    i);
+				return;
+			}
+
+			rr = (struct phyerr_radar_report *)tlv_buf;
+			ath10k_dfs_radar_report(ar, event, rr, tsf);
+			break;
+		case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
+			if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
+				ath10k_warn("too short fft report (%d)\n", i);
+				return;
+			}
+
+			fftr = (struct phyerr_fft_report *)tlv_buf;
+			res = ath10k_dfs_fft_report(ar, event, fftr, tsf);
+			if (res)
+				return;
+			break;
+		}
+
+		i += sizeof(*tlv) + tlv_len;
+	}
+}
+
+static void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
+				struct wmi_single_phyerr_rx_event *event,
+				u64 tsf)
+{
+	ath10k_dbg(ATH10K_DBG_WMI, "wmi event spectral scan\n");
+}
+
 static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
 {
-	ath10k_dbg(ATH10K_DBG_WMI, "WMI_PHYERR_EVENTID\n");
+	struct wmi_comb_phyerr_rx_event *comb_event;
+	struct wmi_single_phyerr_rx_event *event;
+	u32 count, i, buf_len, phy_err_code;
+	u64 tsf;
+	int left_len = skb->len;
+
+	ATH10K_DFS_STAT_INC(ar, phy_errors);
+
+	/* Check if combined event available */
+	if (left_len < sizeof(*comb_event)) {
+		ath10k_warn("wmi phyerr combined event wrong len\n");
+		return;
+	}
+
+	left_len -= sizeof(*comb_event);
+
+	/* Check number of included events */
+	comb_event = (struct wmi_comb_phyerr_rx_event *)skb->data;
+	count = __le32_to_cpu(comb_event->hdr.num_phyerr_events);
+
+	tsf = __le32_to_cpu(comb_event->hdr.tsf_u32);
+	tsf <<= 32;
+	tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32);
+
+	ath10k_dbg(ATH10K_DBG_WMI,
+		   "wmi event phyerr count %d tsf64 0x%llX\n",
+		   count, tsf);
+
+	event = (struct wmi_single_phyerr_rx_event *)comb_event->bufp;
+	for (i = 0; i < count; i++) {
+		/* Check if we can read event header */
+		if (left_len < sizeof(*event)) {
+			ath10k_warn("single event (%d) wrong head len\n", i);
+			return;
+		}
+
+		left_len -= sizeof(*event);
+
+		buf_len = __le32_to_cpu(event->hdr.buf_len);
+		phy_err_code = event->hdr.phy_err_code;
+
+		if (left_len < buf_len) {
+			ath10k_warn("single event (%d) wrong buf len\n", i);
+			return;
+		}
+
+		left_len -= buf_len;
+
+		switch (phy_err_code) {
+		case PHY_ERROR_RADAR:
+			ath10k_wmi_event_dfs(ar, event, tsf);
+			break;
+		case PHY_ERROR_SPECTRAL_SCAN:
+			ath10k_wmi_event_spectral_scan(ar, event, tsf);
+			break;
+		case PHY_ERROR_FALSE_RADAR_EXT:
+			ath10k_wmi_event_dfs(ar, event, tsf);
+			ath10k_wmi_event_spectral_scan(ar, event, tsf);
+			break;
+		default:
+			break;
+		}
+
+		event += sizeof(*event) + buf_len;
+	}
 }
 
 static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
@@ -1400,9 +1682,37 @@ static void ath10k_wmi_event_profile_match(struct ath10k *ar,
 }
 
 static void ath10k_wmi_event_debug_print(struct ath10k *ar,
-				  struct sk_buff *skb)
+					 struct sk_buff *skb)
 {
-	ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_PRINT_EVENTID\n");
+	char buf[101], c;
+	int i;
+
+	for (i = 0; i < sizeof(buf) - 1; i++) {
+		if (i >= skb->len)
+			break;
+
+		c = skb->data[i];
+
+		if (c == '\0')
+			break;
+
+		if (isascii(c) && isprint(c))
+			buf[i] = c;
+		else
+			buf[i] = '.';
+	}
+
+	if (i == sizeof(buf) - 1)
+		ath10k_warn("wmi debug print truncated: %d\n", skb->len);
+
+	/* for some reason the debug prints end with \n, remove that */
+	if (skb->data[i - 1] == '\n')
+		i--;
+
+	/* the last byte is always reserved for the null character */
+	buf[i] = '\0';
+
+	ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf);
 }
 
 static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
@@ -2062,6 +2372,7 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
 {
 	struct wmi_set_channel_cmd *cmd;
 	struct sk_buff *skb;
+	u32 ch_flags = 0;
 
 	if (arg->passive)
 		return -EINVAL;
@@ -2070,10 +2381,14 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
 	if (!skb)
 		return -ENOMEM;
 
+	if (arg->chan_radar)
+		ch_flags |= WMI_CHAN_FLAG_DFS;
+
 	cmd = (struct wmi_set_channel_cmd *)skb->data;
 	cmd->chan.mhz               = __cpu_to_le32(arg->freq);
 	cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq);
 	cmd->chan.mode              = arg->mode;
+	cmd->chan.flags		   |= __cpu_to_le32(ch_flags);
 	cmd->chan.min_power         = arg->min_power;
 	cmd->chan.max_power         = arg->max_power;
 	cmd->chan.reg_power         = arg->max_reg_power;
@@ -2211,7 +2526,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
 	}
 
 	ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
-		   __cpu_to_le32(ar->wmi.num_mem_chunks));
+		   ar->wmi.num_mem_chunks);
 
 	cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
 
@@ -2224,10 +2539,10 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
 			__cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
 
 		ath10k_dbg(ATH10K_DBG_WMI,
-			   "wmi chunk %d len %d requested, addr 0x%x\n",
+			   "wmi chunk %d len %d requested, addr 0x%llx\n",
 			   i,
-			   cmd->host_mem_chunks[i].size,
-			   cmd->host_mem_chunks[i].ptr);
+			   ar->wmi.mem_chunks[i].len,
+			   (unsigned long long)ar->wmi.mem_chunks[i].paddr);
 	}
 out:
 	memcpy(&cmd->resource_config, &config, sizeof(config));
@@ -2302,7 +2617,7 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
 	}
 
 	ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
-		   __cpu_to_le32(ar->wmi.num_mem_chunks));
+		   ar->wmi.num_mem_chunks);
 
 	cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
 
@@ -2315,10 +2630,10 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
 			__cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
 
 		ath10k_dbg(ATH10K_DBG_WMI,
-			   "wmi chunk %d len %d requested, addr 0x%x\n",
+			   "wmi chunk %d len %d requested, addr 0x%llx\n",
 			   i,
-			   cmd->host_mem_chunks[i].size,
-			   cmd->host_mem_chunks[i].ptr);
+			   ar->wmi.mem_chunks[i].len,
+			   (unsigned long long)ar->wmi.mem_chunks[i].paddr);
 	}
 out:
 	memcpy(&cmd->resource_config, &config, sizeof(config));
@@ -2622,6 +2937,7 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
 	struct sk_buff *skb;
 	const char *cmdname;
 	u32 flags = 0;
+	u32 ch_flags = 0;
 
 	if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
 	    cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
@@ -2648,6 +2964,8 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
 		flags |= WMI_VDEV_START_HIDDEN_SSID;
 	if (arg->pmf_enabled)
 		flags |= WMI_VDEV_START_PMF_ENABLED;
+	if (arg->channel.chan_radar)
+		ch_flags |= WMI_CHAN_FLAG_DFS;
 
 	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
 	cmd->vdev_id         = __cpu_to_le32(arg->vdev_id);
@@ -2669,6 +2987,7 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
 		__cpu_to_le32(arg->channel.band_center_freq1);
 
 	cmd->chan.mode = arg->channel.mode;
+	cmd->chan.flags |= __cpu_to_le32(ch_flags);
 	cmd->chan.min_power = arg->channel.min_power;
 	cmd->chan.max_power = arg->channel.max_power;
 	cmd->chan.reg_power = arg->channel.max_reg_power;
@@ -2676,9 +2995,10 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
 	cmd->chan.antenna_max = arg->channel.max_antenna_gain;
 
 	ath10k_dbg(ATH10K_DBG_WMI,
-		   "wmi vdev %s id 0x%x freq %d, mode %d, ch_flags: 0x%0X,"
-		   "max_power: %d\n", cmdname, arg->vdev_id, arg->channel.freq,
-		   arg->channel.mode, flags, arg->channel.max_power);
+		   "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, "
+		   "ch_flags: 0x%0X, max_power: %d\n", cmdname, arg->vdev_id,
+		   flags, arg->channel.freq, arg->channel.mode,
+		   cmd->chan.flags, arg->channel.max_power);
 
 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
 }
@@ -3012,6 +3332,8 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
 			flags |= WMI_CHAN_FLAG_ALLOW_VHT;
 		if (ch->ht40plus)
 			flags |= WMI_CHAN_FLAG_HT40_PLUS;
+		if (ch->chan_radar)
+			flags |= WMI_CHAN_FLAG_DFS;
 
 		ci->mhz               = __cpu_to_le32(ch->freq);
 		ci->band_center_freq1 = __cpu_to_le32(ch->freq);
@@ -3094,6 +3416,7 @@ int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
 {
 	struct wmi_bcn_tx_cmd *cmd;
 	struct sk_buff *skb;
+	int ret;
 
 	skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len);
 	if (!skb)
@@ -3106,7 +3429,11 @@ int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
 	cmd->hdr.bcn_len  = __cpu_to_le32(arg->bcn_len);
 	memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
 
-	return ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid);
+	ret = ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid);
+	if (ret)
+		dev_kfree_skb(skb);
+
+	return ret;
 }
 
 static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
@@ -3175,3 +3502,40 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar,
 		   type, delay_ms);
 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
 }
+
+int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
+{
+	struct wmi_dbglog_cfg_cmd *cmd;
+	struct sk_buff *skb;
+	u32 cfg;
+
+	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
+
+	if (module_enable) {
+		cfg = SM(ATH10K_DBGLOG_LEVEL_VERBOSE,
+			 ATH10K_DBGLOG_CFG_LOG_LVL);
+	} else {
+		/* set back defaults, all modules with WARN level */
+		cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
+			 ATH10K_DBGLOG_CFG_LOG_LVL);
+		module_enable = ~0;
+	}
+
+	cmd->module_enable = __cpu_to_le32(module_enable);
+	cmd->module_valid = __cpu_to_le32(~0);
+	cmd->config_enable = __cpu_to_le32(cfg);
+	cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
+
+	ath10k_dbg(ATH10K_DBG_WMI,
+		   "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
+		   __le32_to_cpu(cmd->module_enable),
+		   __le32_to_cpu(cmd->module_valid),
+		   __le32_to_cpu(cmd->config_enable),
+		   __le32_to_cpu(cmd->config_valid));
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 78c991aec7f9..4b5e7d3d32b6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -893,6 +893,7 @@ struct wmi_channel {
 	union {
 		__le32 reginfo0;
 		struct {
+			/* note: power unit is 0.5 dBm */
 			u8 min_power;
 			u8 max_power;
 			u8 reg_power;
@@ -915,7 +916,8 @@ struct wmi_channel_arg {
 	bool allow_ht;
 	bool allow_vht;
 	bool ht40plus;
-	/* note: power unit is 1/4th of dBm */
+	bool chan_radar;
+	/* note: power unit is 0.5 dBm */
 	u32 min_power;
 	u32 max_power;
 	u32 max_reg_power;
@@ -1977,6 +1979,10 @@ struct wmi_mgmt_rx_event_v2 {
 #define WMI_RX_STATUS_ERR_MIC			0x10
 #define WMI_RX_STATUS_ERR_KEY_CACHE_MISS	0x20
 
+#define PHY_ERROR_SPECTRAL_SCAN		0x26
+#define PHY_ERROR_FALSE_RADAR_EXT		0x24
+#define PHY_ERROR_RADAR				0x05
+
 struct wmi_single_phyerr_rx_hdr {
 	/* TSF timestamp */
 	__le32 tsf_timestamp;
@@ -2068,6 +2074,87 @@ struct wmi_comb_phyerr_rx_event {
 	u8 bufp[0];
 } __packed;
 
+#define PHYERR_TLV_SIG				0xBB
+#define PHYERR_TLV_TAG_SEARCH_FFT_REPORT	0xFB
+#define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY	0xF8
+
+struct phyerr_radar_report {
+	__le32 reg0; /* RADAR_REPORT_REG0_* */
+	__le32 reg1; /* REDAR_REPORT_REG1_* */
+} __packed;
+
+#define RADAR_REPORT_REG0_PULSE_IS_CHIRP_MASK		0x80000000
+#define RADAR_REPORT_REG0_PULSE_IS_CHIRP_LSB		31
+
+#define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_MASK	0x40000000
+#define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_LSB	30
+
+#define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_MASK		0x3FF00000
+#define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_LSB		20
+
+#define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_MASK		0x000F0000
+#define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_LSB		16
+
+#define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_MASK		0x0000FC00
+#define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_LSB		10
+
+#define RADAR_REPORT_REG0_PULSE_SIDX_MASK		0x000003FF
+#define RADAR_REPORT_REG0_PULSE_SIDX_LSB		0
+
+#define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_MASK	0x80000000
+#define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_LSB	31
+
+#define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_MASK	0x7F000000
+#define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_LSB		24
+
+#define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_MASK	0x00FF0000
+#define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_LSB	16
+
+#define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_MASK		0x0000FF00
+#define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_LSB		8
+
+#define RADAR_REPORT_REG1_PULSE_DUR_MASK		0x000000FF
+#define RADAR_REPORT_REG1_PULSE_DUR_LSB			0
+
+struct phyerr_fft_report {
+	__le32 reg0; /* SEARCH_FFT_REPORT_REG0_ * */
+	__le32 reg1; /* SEARCH_FFT_REPORT_REG1_ * */
+} __packed;
+
+#define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_MASK	0xFF800000
+#define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_LSB	23
+
+#define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_MASK		0x007FC000
+#define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_LSB		14
+
+#define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_MASK		0x00003000
+#define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_LSB		12
+
+#define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_MASK		0x00000FFF
+#define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_LSB		0
+
+#define SEARCH_FFT_REPORT_REG1_RELPWR_DB_MASK		0xFC000000
+#define SEARCH_FFT_REPORT_REG1_RELPWR_DB_LSB		26
+
+#define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_MASK		0x03FC0000
+#define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_LSB		18
+
+#define SEARCH_FFT_REPORT_REG1_PEAK_MAG_MASK		0x0003FF00
+#define SEARCH_FFT_REPORT_REG1_PEAK_MAG_LSB		8
+
+#define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_MASK	0x000000FF
+#define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_LSB	0
+
+
+struct phyerr_tlv {
+	__le16 len;
+	u8 tag;
+	u8 sig;
+} __packed;
+
+#define DFS_RSSI_POSSIBLY_FALSE			50
+#define DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE	40
+
 struct wmi_mgmt_tx_hdr {
 	__le32 vdev_id;
 	struct wmi_mac_addr peer_macaddr;
@@ -2233,7 +2320,12 @@ enum wmi_pdev_param {
 	 * 0: no protection 1:use CTS-to-self 2: use RTS/CTS
 	 */
 	WMI_PDEV_PARAM_PROTECTION_MODE,
-	/* Dynamic bandwidth 0: disable 1: enable */
+	/*
+	 * Dynamic bandwidth - 0: disable, 1: enable
+	 *
+	 * When enabled HW rate control tries different bandwidths when
+	 * retransmitting frames.
+	 */
 	WMI_PDEV_PARAM_DYNAMIC_BW,
 	/* Non aggregrate/ 11g sw retry threshold.0-disable */
 	WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
@@ -2911,6 +3003,18 @@ struct wmi_vdev_install_key_arg {
 	const void *key_data;
 };
 
+/*
+ * vdev fixed rate format:
+ * - preamble - b7:b6 - see WMI_RATE_PREMABLE_
+ * - nss      - b5:b4 - ss number (0 mean 1ss)
+ * - rate_mcs - b3:b0 - as below
+ *    CCK:  0 - 11Mbps, 1 - 5,5Mbps, 2 - 2Mbps, 3 - 1Mbps,
+ *          4 - 11Mbps (s), 5 - 5,5Mbps (s), 6 - 2Mbps (s)
+ *    OFDM: 0 - 48Mbps, 1 - 24Mbps, 2 - 12Mbps, 3 - 6Mbps,
+ *          4 - 54Mbps, 5 - 36Mbps, 6 - 18Mbps, 7 - 9Mbps
+ *    HT/VHT: MCS index
+ */
+
 /* Preamble types to be used with VDEV fixed rate configuration */
 enum wmi_rate_preamble {
 	WMI_RATE_PREAMBLE_OFDM,
@@ -3998,6 +4102,54 @@ struct wmi_force_fw_hang_cmd {
 	__le32 delay_ms;
 } __packed;
 
+enum ath10k_dbglog_level {
+	ATH10K_DBGLOG_LEVEL_VERBOSE = 0,
+	ATH10K_DBGLOG_LEVEL_INFO = 1,
+	ATH10K_DBGLOG_LEVEL_WARN = 2,
+	ATH10K_DBGLOG_LEVEL_ERR = 3,
+};
+
+/* VAP ids to enable dbglog */
+#define ATH10K_DBGLOG_CFG_VAP_LOG_LSB		0
+#define ATH10K_DBGLOG_CFG_VAP_LOG_MASK		0x0000ffff
+
+/* to enable dbglog in the firmware */
+#define ATH10K_DBGLOG_CFG_REPORTING_ENABLE_LSB	16
+#define ATH10K_DBGLOG_CFG_REPORTING_ENABLE_MASK	0x00010000
+
+/* timestamp resolution */
+#define ATH10K_DBGLOG_CFG_RESOLUTION_LSB	17
+#define ATH10K_DBGLOG_CFG_RESOLUTION_MASK	0x000E0000
+
+/* number of queued messages before sending them to the host */
+#define ATH10K_DBGLOG_CFG_REPORT_SIZE_LSB	20
+#define ATH10K_DBGLOG_CFG_REPORT_SIZE_MASK	0x0ff00000
+
+/*
+ * Log levels to enable. This defines the minimum level to enable, this is
+ * not a bitmask. See enum ath10k_dbglog_level for the values.
+ */
+#define ATH10K_DBGLOG_CFG_LOG_LVL_LSB		28
+#define ATH10K_DBGLOG_CFG_LOG_LVL_MASK		0x70000000
+
+/*
+ * Note: this is a cleaned up version of a struct firmware uses. For
+ * example, config_valid was hidden inside an array.
+ */
+struct wmi_dbglog_cfg_cmd {
+	/* bitmask to hold mod id config*/
+	__le32 module_enable;
+
+	/* see ATH10K_DBGLOG_CFG_ */
+	__le32 config_enable;
+
+	/* mask of module id bits to be changed */
+	__le32 module_valid;
+
+	/* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */
+	__le32 config_valid;
+} __packed;
+
 #define ATH10K_RTS_MAX		2347
 #define ATH10K_FRAGMT_THRESHOLD_MIN	540
 #define ATH10K_FRAGMT_THRESHOLD_MAX	2346
@@ -4075,5 +4227,6 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
 int ath10k_wmi_force_fw_hang(struct ath10k *ar,
 			     enum wmi_force_fw_hang_type type, u32 delay_ms);
 int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable);
 
 #endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 69f58b073e85..ef35da84f63b 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1238,14 +1238,11 @@ static void
 ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb,
 		     struct ieee80211_rx_status *rxs)
 {
-	struct ath_common *common = ath5k_hw_common(ah);
 	u64 tsf, bc_tstamp;
 	u32 hw_tu;
 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
 
-	if (ieee80211_is_beacon(mgmt->frame_control) &&
-	    le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
-	    ether_addr_equal(mgmt->bssid, common->curbssid)) {
+	if (le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS) {
 		/*
 		 * Received an IBSS beacon with the same BSSID. Hardware *must*
 		 * have updated the local TSF. We have to work around various
@@ -1301,23 +1298,6 @@ ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb,
 	}
 }
 
-static void
-ath5k_update_beacon_rssi(struct ath5k_hw *ah, struct sk_buff *skb, int rssi)
-{
-	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
-	struct ath_common *common = ath5k_hw_common(ah);
-
-	/* only beacons from our BSSID */
-	if (!ieee80211_is_beacon(mgmt->frame_control) ||
-	    !ether_addr_equal(mgmt->bssid, common->curbssid))
-		return;
-
-	ewma_add(&ah->ah_beacon_rssi_avg, rssi);
-
-	/* in IBSS mode we should keep RSSI statistics per neighbour */
-	/* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
-}
-
 /*
  * Compute padding position. skb must contain an IEEE 802.11 frame
  */
@@ -1390,6 +1370,7 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
 		    struct ath5k_rx_status *rs)
 {
 	struct ieee80211_rx_status *rxs;
+	struct ath_common *common = ath5k_hw_common(ah);
 
 	ath5k_remove_padding(skb);
 
@@ -1442,11 +1423,13 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
 
 	trace_ath5k_rx(ah, skb);
 
-	ath5k_update_beacon_rssi(ah, skb, rs->rs_rssi);
+	if (ath_is_mybeacon(common, (struct ieee80211_hdr *)skb->data)) {
+		ewma_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
 
-	/* check beacons in IBSS mode */
-	if (ah->opmode == NL80211_IFTYPE_ADHOC)
-		ath5k_check_ibss_tsf(ah, skb, rxs);
+		/* check beacons in IBSS mode */
+		if (ah->opmode == NL80211_IFTYPE_ADHOC)
+			ath5k_check_ibss_tsf(ah, skb, rxs);
+	}
 
 	ieee80211_rx(ah->hw, skb);
 }
@@ -2549,7 +2532,6 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
 	hw->wiphy->available_antennas_rx = 0x3;
 
 	hw->extra_tx_headroom = 2;
-	hw->channel_change_time = 5000;
 
 	/*
 	 * Mark the device as detached to avoid processing
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index ba200b24be64..e6c52f7c26e7 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -616,7 +616,16 @@ ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
 		 * SISRs will also clear PISR so no need to worry here.
 		 */
 
-		pisr_clear = pisr & ~AR5K_ISR_BITS_FROM_SISRS;
+		/* XXX: There seems to be  an issue on some cards
+		 *	with tx interrupt flags not being updated
+		 *	on PISR despite that all Tx interrupt bits
+		 * 	are cleared on SISRs. Since we handle all
+		 *	Tx queues all together it shouldn't be an
+		 *	issue if we clear Tx interrupt flags also
+		 * 	on PISR to avoid that.
+		 */
+		pisr_clear = (pisr & ~AR5K_ISR_BITS_FROM_SISRS) |
+					(pisr & AR5K_INT_TX_ALL);
 
 		/*
 		 * Write to clear them...
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 2437ad26949d..fd4c89df67e1 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1109,7 +1109,9 @@ void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
 				(mode == WMI_11G_HT20) ?
 					NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT);
 
+	mutex_lock(&vif->wdev.mtx);
 	cfg80211_ch_switch_notify(vif->ndev, &chandef);
+	mutex_unlock(&vif->wdev.mtx);
 }
 
 static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -3169,12 +3171,15 @@ static bool ath6kl_is_p2p_go_ssid(const u8 *buf, size_t len)
 }
 
 static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
-			  struct ieee80211_channel *chan, bool offchan,
-			  unsigned int wait, const u8 *buf, size_t len,
-			  bool no_cck, bool dont_wait_for_ack, u64 *cookie)
+			  struct cfg80211_mgmt_tx_params *params, u64 *cookie)
 {
 	struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
 	struct ath6kl *ar = ath6kl_priv(vif->ndev);
+	struct ieee80211_channel *chan = params->chan;
+	const u8 *buf = params->buf;
+	size_t len = params->len;
+	unsigned int wait = params->wait;
+	bool no_cck = params->no_cck;
 	u32 id, freq;
 	const struct ieee80211_mgmt *mgmt;
 	bool more_data, queued;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 546d5da0b894..4f16d79c9eb1 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -2754,9 +2754,9 @@ static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx,
 				mask->control[band].legacy << 4;
 
 		/* copy mcs rate mask */
-		mcsrate = mask->control[band].mcs[1];
+		mcsrate = mask->control[band].ht_mcs[1];
 		mcsrate <<= 8;
-		mcsrate |= mask->control[band].mcs[0];
+		mcsrate |= mask->control[band].ht_mcs[0];
 		ratemask[band] |= mcsrate << 12;
 		ratemask[band] |= mcsrate << 28;
 	}
@@ -2806,7 +2806,7 @@ static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
 				mask->control[band].legacy << 4;
 
 		/* copy mcs rate mask */
-		mcsrate = mask->control[band].mcs[0];
+		mcsrate = mask->control[band].ht_mcs[0];
 		ratemask[band] |= mcsrate << 12;
 		ratemask[band] |= mcsrate << 20;
 	}
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 32f139e2e897..7b96b3e5712d 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -65,6 +65,14 @@ config ATH9K_DEBUGFS
 
 	  Also required for changing debug message flags at run time.
 
+config ATH9K_STATION_STATISTICS
+	bool "Detailed station statistics"
+	depends on ATH9K && ATH9K_DEBUGFS && DEBUG_FS
+	select MAC80211_DEBUGFS
+	default n
+	---help---
+	  This option enables detailed statistics for association stations.
+
 config ATH9K_DFS_CERTIFIED
 	bool "Atheros DFS support for certified platforms"
 	depends on ATH9K && CFG80211_CERTIFICATION_ONUS
@@ -86,7 +94,7 @@ config ATH9K_DFS_CERTIFIED
 
 config ATH9K_TX99
 	bool "Atheros ath9k TX99 testing support"
-	depends on CFG80211_CERTIFICATION_ONUS
+	depends on ATH9K_DEBUGFS && CFG80211_CERTIFICATION_ONUS
 	default n
 	---help---
 	  Say N. This should only be enabled on systems undergoing
@@ -104,6 +112,14 @@ config ATH9K_TX99
 	  be evaluated to meet the RF exposure limits set forth in the
 	  governmental SAR regulations.
 
+config ATH9K_WOW
+	bool "Wake on Wireless LAN support (EXPERIMENTAL)"
+	depends on ATH9K && PM
+	default n
+	---help---
+	  This option enables Wake on Wireless LAN support for certain cards.
+	  Currently, AR9462 is supported.
+
 config ATH9K_LEGACY_RATE_CONTROL
 	bool "Atheros ath9k rate control"
 	depends on ATH9K
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 6205ef5a9321..a40e5c5d7418 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -11,11 +11,15 @@ ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
 ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o
 ath9k-$(CONFIG_ATH9K_PCI) += pci.o
 ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
-ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
 ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
-ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += \
-		dfs.o
-ath9k-$(CONFIG_PM_SLEEP) += wow.o
+ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o
+ath9k-$(CONFIG_ATH9K_TX99) += tx99.o
+ath9k-$(CONFIG_ATH9K_WOW) += wow.o
+
+ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o \
+				 spectral.o
+
+ath9k-$(CONFIG_ATH9K_STATION_STATISTICS) += debug_sta.o
 
 obj-$(CONFIG_ATH9K) += ath9k.o
 
@@ -41,6 +45,8 @@ ath9k_hw-y:=	\
 		ar9003_eeprom.o \
 		ar9003_paprd.o
 
+ath9k_hw-$(CONFIG_ATH9K_WOW) += ar9003_wow.o
+
 ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \
 					   ar9003_mci.o
 obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index bd048cc69a33..a3668433dc02 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -724,14 +724,14 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
 	struct ath_ant_comb *antcomb = &sc->ant_comb;
 	int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
 	int curr_main_set;
-	int main_rssi = rs->rs_rssi_ctl0;
-	int alt_rssi = rs->rs_rssi_ctl1;
+	int main_rssi = rs->rs_rssi_ctl[0];
+	int alt_rssi = rs->rs_rssi_ctl[1];
 	int rx_ant_conf,  main_ant_conf;
 	bool short_scan = false, ret;
 
-	rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
+	rx_ant_conf = (rs->rs_rssi_ctl[2] >> ATH_ANT_RX_CURRENT_SHIFT) &
 		       ATH_ANT_RX_MASK;
-	main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
+	main_ant_conf = (rs->rs_rssi_ctl[2] >> ATH_ANT_RX_MAIN_SHIFT) &
 			 ATH_ANT_RX_MASK;
 
 	if (alt_rssi >= antcomb->low_rssi_thresh) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 5c95fd9e9c9e..d480d2f3e185 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -32,12 +32,8 @@ static int ar9002_hw_init_mode_regs(struct ath_hw *ah)
 		return 0;
 	}
 
-	if (ah->config.pcie_clock_req)
-		INIT_INI_ARRAY(&ah->iniPcieSerdes,
-			   ar9280PciePhy_clkreq_off_L1_9280);
-	else
-		INIT_INI_ARRAY(&ah->iniPcieSerdes,
-			   ar9280PciePhy_clkreq_always_on_L1_9280);
+	INIT_INI_ARRAY(&ah->iniPcieSerdes,
+		       ar9280PciePhy_clkreq_always_on_L1_9280);
 
 	if (AR_SREV_9287_11_OR_LATER(ah)) {
 		INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1);
@@ -387,6 +383,20 @@ void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
 	}
 }
 
+static void ar9002_hw_init_hang_checks(struct ath_hw *ah)
+{
+	if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
+		ah->config.hw_hang_checks |= HW_BB_RIFS_HANG;
+		ah->config.hw_hang_checks |= HW_BB_DFS_HANG;
+	}
+
+	if (AR_SREV_9280(ah))
+		ah->config.hw_hang_checks |= HW_BB_RX_CLEAR_STUCK_HANG;
+
+	if (AR_SREV_5416(ah) || AR_SREV_9100(ah) || AR_SREV_9160(ah))
+		ah->config.hw_hang_checks |= HW_MAC_HANG;
+}
+
 /* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
 int ar9002_hw_attach_ops(struct ath_hw *ah)
 {
@@ -399,6 +409,7 @@ int ar9002_hw_attach_ops(struct ath_hw *ah)
 		return ret;
 
 	priv_ops->init_mode_gain_regs = ar9002_hw_init_mode_gain_regs;
+	priv_ops->init_hang_checks = ar9002_hw_init_hang_checks;
 
 	ops->config_pci_powersave = ar9002_hw_configpcipowersave;
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index a366d6b4626f..741b38ddcb37 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -29,7 +29,8 @@ static void ar9002_hw_set_desc_link(void *ds, u32 ds_link)
 	((struct ath_desc*) ds)->ds_link = ds_link;
 }
 
-static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
+			      u32 *sync_cause_p)
 {
 	u32 isr = 0;
 	u32 mask2 = 0;
@@ -170,7 +171,8 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
 		return true;
 
 	if (sync_cause) {
-		ath9k_debug_sync_cause(common, sync_cause);
+		if (sync_cause_p)
+			*sync_cause_p = sync_cause;
 		fatal_int =
 			(sync_cause &
 			 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index f087117b2e6b..9a2afa2c690b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -201,7 +201,6 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
 	ath9k_hw_get_channel_centers(ah, chan, &centers);
 	freq = centers.synth_center;
 
-	ah->config.spurmode = SPUR_ENABLE_EEPROM;
 	for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
 		cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 7546b9a7dcbf..0a6163e9248c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -303,7 +303,7 @@ static const u32 ar9300_2p2_mac_postamble[][5] = {
 	{0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
 	{0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
 	{0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
-	{0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+	{0x00008120, 0x18f04800, 0x18f04800, 0x18f04810, 0x18f04810},
 	{0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
 	{0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
 };
@@ -352,7 +352,7 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
 	{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
 	{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
 	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
-	{0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
+	{0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982},
 	{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
 	{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
@@ -378,9 +378,9 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
 	{0x00009814, 0x9280c00a},
 	{0x00009818, 0x00000000},
 	{0x0000981c, 0x00020028},
-	{0x00009834, 0x6400a290},
+	{0x00009834, 0x6400a190},
 	{0x00009838, 0x0108ecff},
-	{0x0000983c, 0x0d000600},
+	{0x0000983c, 0x14000600},
 	{0x00009880, 0x201fff00},
 	{0x00009884, 0x00001042},
 	{0x000098a4, 0x00200400},
@@ -401,7 +401,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
 	{0x00009d04, 0x40206c10},
 	{0x00009d08, 0x009c4060},
 	{0x00009d0c, 0x9883800a},
-	{0x00009d10, 0x01834061},
+	{0x00009d10, 0x01884061},
 	{0x00009d14, 0x00c0040b},
 	{0x00009d18, 0x00000000},
 	{0x00009e08, 0x0038230c},
@@ -459,7 +459,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
 	{0x0000a3e8, 0x20202020},
 	{0x0000a3ec, 0x20202020},
 	{0x0000a3f0, 0x00000000},
-	{0x0000a3f4, 0x00000246},
+	{0x0000a3f4, 0x00000000},
 	{0x0000a3f8, 0x0c9bd380},
 	{0x0000a3fc, 0x000f0f01},
 	{0x0000a400, 0x8fa91f01},
@@ -534,107 +534,107 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
 
 static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
-	{0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
-	{0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+	{0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+	{0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+	{0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
 	{0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
-	{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
-	{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
-	{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
-	{0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
-	{0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
-	{0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
-	{0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
-	{0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
-	{0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
-	{0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
-	{0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
-	{0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
-	{0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
-	{0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
-	{0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
-	{0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
-	{0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
-	{0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
-	{0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
-	{0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
-	{0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
-	{0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
-	{0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
-	{0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
-	{0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
-	{0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
-	{0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
-	{0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
-	{0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
-	{0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
-	{0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
-	{0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
-	{0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
-	{0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
-	{0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
-	{0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
-	{0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
-	{0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
-	{0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
-	{0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
-	{0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
-	{0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
-	{0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
-	{0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
-	{0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
-	{0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
-	{0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
-	{0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
-	{0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
-	{0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
-	{0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
-	{0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
-	{0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
-	{0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
-	{0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
-	{0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
-	{0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
-	{0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
-	{0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
-	{0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
-	{0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
-	{0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
-	{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
-	{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
-	{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+	{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+	{0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+	{0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+	{0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202},
+	{0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400},
+	{0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402},
+	{0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404},
+	{0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603},
+	{0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02},
+	{0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04},
+	{0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20},
+	{0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20},
+	{0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22},
+	{0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24},
+	{0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640},
+	{0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660},
+	{0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861},
+	{0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81},
+	{0x0000a54c, 0x5e08442e, 0x5e08442e, 0x47001a83, 0x47001a83},
+	{0x0000a550, 0x620a4431, 0x620a4431, 0x4a001c84, 0x4a001c84},
+	{0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3},
+	{0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5},
+	{0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9},
+	{0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb},
+	{0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+	{0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+	{0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+	{0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+	{0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+	{0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+	{0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+	{0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+	{0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+	{0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+	{0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+	{0x0000a590, 0x15800028, 0x15800028, 0x0f800202, 0x0f800202},
+	{0x0000a594, 0x1b80002b, 0x1b80002b, 0x12800400, 0x12800400},
+	{0x0000a598, 0x1f820028, 0x1f820028, 0x16800402, 0x16800402},
+	{0x0000a59c, 0x2582002b, 0x2582002b, 0x19800404, 0x19800404},
+	{0x0000a5a0, 0x2a84002a, 0x2a84002a, 0x1c800603, 0x1c800603},
+	{0x0000a5a4, 0x2e86002a, 0x2e86002a, 0x21800a02, 0x21800a02},
+	{0x0000a5a8, 0x3382202d, 0x3382202d, 0x25800a04, 0x25800a04},
+	{0x0000a5ac, 0x3884202c, 0x3884202c, 0x28800a20, 0x28800a20},
+	{0x0000a5b0, 0x3c86202c, 0x3c86202c, 0x2c800e20, 0x2c800e20},
+	{0x0000a5b4, 0x4188202d, 0x4188202d, 0x30800e22, 0x30800e22},
+	{0x0000a5b8, 0x4586402d, 0x4586402d, 0x34800e24, 0x34800e24},
+	{0x0000a5bc, 0x4986222d, 0x4986222d, 0x38801640, 0x38801640},
+	{0x0000a5c0, 0x4d862231, 0x4d862231, 0x3c801660, 0x3c801660},
+	{0x0000a5c4, 0x50882231, 0x50882231, 0x3f801861, 0x3f801861},
+	{0x0000a5c8, 0x5688422e, 0x5688422e, 0x43801a81, 0x43801a81},
+	{0x0000a5cc, 0x5e88442e, 0x5e88442e, 0x47801a83, 0x47801a83},
+	{0x0000a5d0, 0x628a4431, 0x628a4431, 0x4a801c84, 0x4a801c84},
+	{0x0000a5d4, 0x648a4432, 0x648a4432, 0x4e801ce3, 0x4e801ce3},
+	{0x0000a5d8, 0x688a4434, 0x688a4434, 0x52801ce5, 0x52801ce5},
+	{0x0000a5dc, 0x6c8a6434, 0x6c8a6434, 0x56801ce9, 0x56801ce9},
+	{0x0000a5e0, 0x6f8a6633, 0x6f8a6633, 0x5a801ceb, 0x5a801ceb},
+	{0x0000a5e4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+	{0x0000a5e8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+	{0x0000a5ec, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+	{0x0000a5f0, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+	{0x0000a5f4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+	{0x0000a5f8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+	{0x0000a5fc, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
 	{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
-	{0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
-	{0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
-	{0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
-	{0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
-	{0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
-	{0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
-	{0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
-	{0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
-	{0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
-	{0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
-	{0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
-	{0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
-	{0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
-	{0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+	{0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+	{0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+	{0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+	{0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000},
+	{0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501},
+	{0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501},
+	{0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03},
+	{0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+	{0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04},
+	{0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+	{0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+	{0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+	{0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+	{0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+	{0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+	{0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+	{0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
 	{0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
-	{0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
-	{0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
-	{0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+	{0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+	{0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+	{0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
 	{0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
 	{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
-	{0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+	{0x00016048, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
 	{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
 	{0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
-	{0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+	{0x00016448, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
 	{0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
 	{0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
-	{0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+	{0x00016848, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
 	{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
 };
 
@@ -644,7 +644,7 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
 	{0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
 	{0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
 	{0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
-	{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
+	{0x0000a410, 0x000050d4, 0x000050d4, 0x000050d9, 0x000050d9},
 	{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
 	{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
 	{0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
@@ -1086,8 +1086,8 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
 	{0x0000b074, 0x00000000},
 	{0x0000b078, 0x00000000},
 	{0x0000b07c, 0x00000000},
-	{0x0000b080, 0x2a2d2f32},
-	{0x0000b084, 0x21232328},
+	{0x0000b080, 0x23232323},
+	{0x0000b084, 0x21232323},
 	{0x0000b088, 0x19191c1e},
 	{0x0000b08c, 0x12141417},
 	{0x0000b090, 0x07070e0e},
@@ -1385,9 +1385,9 @@ static const u32 ar9300_2p2_mac_core[][2] = {
 	{0x000081f8, 0x00000000},
 	{0x000081fc, 0x00000000},
 	{0x00008240, 0x00100000},
-	{0x00008244, 0x0010f424},
+	{0x00008244, 0x0010f400},
 	{0x00008248, 0x00000800},
-	{0x0000824c, 0x0001e848},
+	{0x0000824c, 0x0001e800},
 	{0x00008250, 0x00000000},
 	{0x00008254, 0x00000000},
 	{0x00008258, 0x00000000},
@@ -1726,16 +1726,30 @@ static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = {
 
 static const u32 ar9300PciePhy_clkreq_enable_L1_2p2[][2] = {
 	/* Addr      allmodes  */
-	{0x00004040, 0x08253e5e},
+	{0x00004040, 0x0825365e},
 	{0x00004040, 0x0008003b},
 	{0x00004044, 0x00000000},
 };
 
 static const u32 ar9300PciePhy_clkreq_disable_L1_2p2[][2] = {
 	/* Addr      allmodes  */
-	{0x00004040, 0x08213e5e},
+	{0x00004040, 0x0821365e},
 	{0x00004040, 0x0008003b},
 	{0x00004044, 0x00000000},
 };
 
+static const u32 ar9300_2p2_baseband_core_txfir_coeff_japan_2484[][2] = {
+	/* Addr      allmodes  */
+	{0x0000a398, 0x00000000},
+	{0x0000a39c, 0x6f7f0301},
+	{0x0000a3a0, 0xca9228ee},
+};
+
+static const u32 ar9300_2p2_baseband_postamble_dfs_channel[][3] = {
+	/* Addr      5G          2G        */
+	{0x00009824, 0x5ac668d0, 0x5ac668d0},
+	{0x00009e0c, 0x6d4000e2, 0x6d4000e2},
+	{0x00009e14, 0x37b9625e, 0x37b9625e},
+};
+
 #endif /* INITVALS_9003_2P2_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_buffalo_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_buffalo_initvals.h
new file mode 100644
index 000000000000..59cf738f70df
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_buffalo_initvals.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9003_BUFFALO_H
+#define INITVALS_9003_BUFFALO_H
+
+static const u32 ar9300Modes_high_power_tx_gain_table_buffalo[][5] = {
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+	{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+	{0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+	{0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+	{0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+	{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
+	{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+	{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+	{0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+	{0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+	{0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+	{0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+	{0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+	{0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+	{0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+	{0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+	{0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+	{0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+	{0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+	{0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+	{0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+	{0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+	{0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+	{0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+	{0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
+	{0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
+	{0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+	{0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+	{0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+	{0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+	{0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+	{0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+	{0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+	{0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+	{0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+	{0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+	{0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+	{0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+	{0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+	{0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+	{0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+	{0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+	{0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+	{0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+	{0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+	{0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+	{0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+	{0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+	{0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+	{0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+	{0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+	{0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+	{0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+	{0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+	{0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+	{0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+	{0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+	{0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+	{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+	{0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+	{0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+	{0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+	{0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+	{0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+	{0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+	{0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+	{0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+	{0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+	{0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+	{0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+	{0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+	{0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+	{0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+	{0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+	{0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+	{0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+	{0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+	{0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+	{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+	{0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+	{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+	{0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+	{0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+	{0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+	{0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+	{0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+	{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+#endif /* INITVALS_9003_BUFFALO_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 22934d3ca544..a352128c40ad 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -326,6 +326,224 @@ static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
 	ah->supp_cals = IQ_MISMATCH_CAL;
 }
 
+#define OFF_UPPER_LT 24
+#define OFF_LOWER_LT 7
+
+static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah,
+					      bool txiqcal_done)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	int ch0_done, osdac_ch0, dc_off_ch0_i1, dc_off_ch0_q1, dc_off_ch0_i2,
+		dc_off_ch0_q2, dc_off_ch0_i3, dc_off_ch0_q3;
+	int ch1_done, osdac_ch1, dc_off_ch1_i1, dc_off_ch1_q1, dc_off_ch1_i2,
+		dc_off_ch1_q2, dc_off_ch1_i3, dc_off_ch1_q3;
+	int ch2_done, osdac_ch2, dc_off_ch2_i1, dc_off_ch2_q1, dc_off_ch2_i2,
+		dc_off_ch2_q2, dc_off_ch2_i3, dc_off_ch2_q3;
+	bool status;
+	u32 temp, val;
+
+	/*
+	 * Clear offset and IQ calibration, run AGC cal.
+	 */
+	REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+		    AR_PHY_AGC_CONTROL_OFFSET_CAL);
+	REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+		    AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
+	REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+		  REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_CAL);
+
+	status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+			       AR_PHY_AGC_CONTROL_CAL,
+			       0, AH_WAIT_TIMEOUT);
+	if (!status) {
+		ath_dbg(common, CALIBRATE,
+			"AGC cal without offset cal failed to complete in 1ms");
+		return false;
+	}
+
+	/*
+	 * Allow only offset calibration and disable the others
+	 * (Carrier Leak calibration, TX Filter calibration and
+	 *  Peak Detector offset calibration).
+	 */
+	REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+		    AR_PHY_AGC_CONTROL_OFFSET_CAL);
+	REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL,
+		    AR_PHY_CL_CAL_ENABLE);
+	REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+		    AR_PHY_AGC_CONTROL_FLTR_CAL);
+	REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+		    AR_PHY_AGC_CONTROL_PKDET_CAL);
+
+	ch0_done = 0;
+	ch1_done = 0;
+	ch2_done = 0;
+
+	while ((ch0_done == 0) || (ch1_done == 0) || (ch2_done == 0)) {
+		osdac_ch0 = (REG_READ(ah, AR_PHY_65NM_CH0_BB1) >> 30) & 0x3;
+		osdac_ch1 = (REG_READ(ah, AR_PHY_65NM_CH1_BB1) >> 30) & 0x3;
+		osdac_ch2 = (REG_READ(ah, AR_PHY_65NM_CH2_BB1) >> 30) & 0x3;
+
+		REG_SET_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+		REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+			  REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_CAL);
+
+		status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+				       AR_PHY_AGC_CONTROL_CAL,
+				       0, AH_WAIT_TIMEOUT);
+		if (!status) {
+			ath_dbg(common, CALIBRATE,
+				"DC offset cal failed to complete in 1ms");
+			return false;
+		}
+
+		REG_CLR_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+		/*
+		 * High gain.
+		 */
+		REG_WRITE(ah, AR_PHY_65NM_CH0_BB3,
+			  ((REG_READ(ah, AR_PHY_65NM_CH0_BB3) & 0xfffffcff) | (1 << 8)));
+		REG_WRITE(ah, AR_PHY_65NM_CH1_BB3,
+			  ((REG_READ(ah, AR_PHY_65NM_CH1_BB3) & 0xfffffcff) | (1 << 8)));
+		REG_WRITE(ah, AR_PHY_65NM_CH2_BB3,
+			  ((REG_READ(ah, AR_PHY_65NM_CH2_BB3) & 0xfffffcff) | (1 << 8)));
+
+		temp = REG_READ(ah, AR_PHY_65NM_CH0_BB3);
+		dc_off_ch0_i1 = (temp >> 26) & 0x1f;
+		dc_off_ch0_q1 = (temp >> 21) & 0x1f;
+
+		temp = REG_READ(ah, AR_PHY_65NM_CH1_BB3);
+		dc_off_ch1_i1 = (temp >> 26) & 0x1f;
+		dc_off_ch1_q1 = (temp >> 21) & 0x1f;
+
+		temp = REG_READ(ah, AR_PHY_65NM_CH2_BB3);
+		dc_off_ch2_i1 = (temp >> 26) & 0x1f;
+		dc_off_ch2_q1 = (temp >> 21) & 0x1f;
+
+		/*
+		 * Low gain.
+		 */
+		REG_WRITE(ah, AR_PHY_65NM_CH0_BB3,
+			  ((REG_READ(ah, AR_PHY_65NM_CH0_BB3) & 0xfffffcff) | (2 << 8)));
+		REG_WRITE(ah, AR_PHY_65NM_CH1_BB3,
+			  ((REG_READ(ah, AR_PHY_65NM_CH1_BB3) & 0xfffffcff) | (2 << 8)));
+		REG_WRITE(ah, AR_PHY_65NM_CH2_BB3,
+			  ((REG_READ(ah, AR_PHY_65NM_CH2_BB3) & 0xfffffcff) | (2 << 8)));
+
+		temp = REG_READ(ah, AR_PHY_65NM_CH0_BB3);
+		dc_off_ch0_i2 = (temp >> 26) & 0x1f;
+		dc_off_ch0_q2 = (temp >> 21) & 0x1f;
+
+		temp = REG_READ(ah, AR_PHY_65NM_CH1_BB3);
+		dc_off_ch1_i2 = (temp >> 26) & 0x1f;
+		dc_off_ch1_q2 = (temp >> 21) & 0x1f;
+
+		temp = REG_READ(ah, AR_PHY_65NM_CH2_BB3);
+		dc_off_ch2_i2 = (temp >> 26) & 0x1f;
+		dc_off_ch2_q2 = (temp >> 21) & 0x1f;
+
+		/*
+		 * Loopback.
+		 */
+		REG_WRITE(ah, AR_PHY_65NM_CH0_BB3,
+			  ((REG_READ(ah, AR_PHY_65NM_CH0_BB3) & 0xfffffcff) | (3 << 8)));
+		REG_WRITE(ah, AR_PHY_65NM_CH1_BB3,
+			  ((REG_READ(ah, AR_PHY_65NM_CH1_BB3) & 0xfffffcff) | (3 << 8)));
+		REG_WRITE(ah, AR_PHY_65NM_CH2_BB3,
+			  ((REG_READ(ah, AR_PHY_65NM_CH2_BB3) & 0xfffffcff) | (3 << 8)));
+
+		temp = REG_READ(ah, AR_PHY_65NM_CH0_BB3);
+		dc_off_ch0_i3 = (temp >> 26) & 0x1f;
+		dc_off_ch0_q3 = (temp >> 21) & 0x1f;
+
+		temp = REG_READ(ah, AR_PHY_65NM_CH1_BB3);
+		dc_off_ch1_i3 = (temp >> 26) & 0x1f;
+		dc_off_ch1_q3 = (temp >> 21) & 0x1f;
+
+		temp = REG_READ(ah, AR_PHY_65NM_CH2_BB3);
+		dc_off_ch2_i3 = (temp >> 26) & 0x1f;
+		dc_off_ch2_q3 = (temp >> 21) & 0x1f;
+
+		if ((dc_off_ch0_i1 > OFF_UPPER_LT) || (dc_off_ch0_i1 < OFF_LOWER_LT) ||
+		    (dc_off_ch0_i2 > OFF_UPPER_LT) || (dc_off_ch0_i2 < OFF_LOWER_LT) ||
+		    (dc_off_ch0_i3 > OFF_UPPER_LT) || (dc_off_ch0_i3 < OFF_LOWER_LT) ||
+		    (dc_off_ch0_q1 > OFF_UPPER_LT) || (dc_off_ch0_q1 < OFF_LOWER_LT) ||
+		    (dc_off_ch0_q2 > OFF_UPPER_LT) || (dc_off_ch0_q2 < OFF_LOWER_LT) ||
+		    (dc_off_ch0_q3 > OFF_UPPER_LT) || (dc_off_ch0_q3 < OFF_LOWER_LT)) {
+			if (osdac_ch0 == 3) {
+				ch0_done = 1;
+			} else {
+				osdac_ch0++;
+
+				val = REG_READ(ah, AR_PHY_65NM_CH0_BB1) & 0x3fffffff;
+				val |= (osdac_ch0 << 30);
+				REG_WRITE(ah, AR_PHY_65NM_CH0_BB1, val);
+
+				ch0_done = 0;
+			}
+		} else {
+			ch0_done = 1;
+		}
+
+		if ((dc_off_ch1_i1 > OFF_UPPER_LT) || (dc_off_ch1_i1 < OFF_LOWER_LT) ||
+		    (dc_off_ch1_i2 > OFF_UPPER_LT) || (dc_off_ch1_i2 < OFF_LOWER_LT) ||
+		    (dc_off_ch1_i3 > OFF_UPPER_LT) || (dc_off_ch1_i3 < OFF_LOWER_LT) ||
+		    (dc_off_ch1_q1 > OFF_UPPER_LT) || (dc_off_ch1_q1 < OFF_LOWER_LT) ||
+		    (dc_off_ch1_q2 > OFF_UPPER_LT) || (dc_off_ch1_q2 < OFF_LOWER_LT) ||
+		    (dc_off_ch1_q3 > OFF_UPPER_LT) || (dc_off_ch1_q3 < OFF_LOWER_LT)) {
+			if (osdac_ch1 == 3) {
+				ch1_done = 1;
+			} else {
+				osdac_ch1++;
+
+				val = REG_READ(ah, AR_PHY_65NM_CH1_BB1) & 0x3fffffff;
+				val |= (osdac_ch1 << 30);
+				REG_WRITE(ah, AR_PHY_65NM_CH1_BB1, val);
+
+				ch1_done = 0;
+			}
+		} else {
+			ch1_done = 1;
+		}
+
+		if ((dc_off_ch2_i1 > OFF_UPPER_LT) || (dc_off_ch2_i1 < OFF_LOWER_LT) ||
+		    (dc_off_ch2_i2 > OFF_UPPER_LT) || (dc_off_ch2_i2 < OFF_LOWER_LT) ||
+		    (dc_off_ch2_i3 > OFF_UPPER_LT) || (dc_off_ch2_i3 < OFF_LOWER_LT) ||
+		    (dc_off_ch2_q1 > OFF_UPPER_LT) || (dc_off_ch2_q1 < OFF_LOWER_LT) ||
+		    (dc_off_ch2_q2 > OFF_UPPER_LT) || (dc_off_ch2_q2 < OFF_LOWER_LT) ||
+		    (dc_off_ch2_q3 > OFF_UPPER_LT) || (dc_off_ch2_q3 < OFF_LOWER_LT)) {
+			if (osdac_ch2 == 3) {
+				ch2_done = 1;
+			} else {
+				osdac_ch2++;
+
+				val = REG_READ(ah, AR_PHY_65NM_CH2_BB1) & 0x3fffffff;
+				val |= (osdac_ch2 << 30);
+				REG_WRITE(ah, AR_PHY_65NM_CH2_BB1, val);
+
+				ch2_done = 0;
+			}
+		} else {
+			ch2_done = 1;
+		}
+	}
+
+	REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+		    AR_PHY_AGC_CONTROL_OFFSET_CAL);
+	REG_SET_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+	/*
+	 * We don't need to check txiqcal_done here since it is always
+	 * set for AR9550.
+	 */
+	REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+		    AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
+
+	return true;
+}
+
 /*
  * solve 4x4 linear equation used in loopback iq cal.
  */
@@ -347,7 +565,7 @@ static bool ar9003_hw_solve_iq_cal(struct ath_hw *ah,
 	const s32 result_shift = 1 << 15;
 	struct ath_common *common = ath9k_hw_common(ah);
 
-	f2 = (f1 * f1 + f3 * f3) / result_shift;
+	f2 = ((f1 >> 3) * (f1 >> 3) + (f3 >> 3) * (f3 >> 3)) >> 9;
 
 	if (!f2) {
 		ath_dbg(common, CALIBRATE, "Divide by 0\n");
@@ -437,8 +655,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
 	if (i2_m_q2_a0_d1 > 0x800)
 		i2_m_q2_a0_d1 = -((0xfff - i2_m_q2_a0_d1) + 1);
 
-	if (i2_p_q2_a0_d1 > 0x800)
-		i2_p_q2_a0_d1 = -((0xfff - i2_p_q2_a0_d1) + 1);
+	if (i2_p_q2_a0_d1 > 0x1000)
+		i2_p_q2_a0_d1 = -((0x1fff - i2_p_q2_a0_d1) + 1);
 
 	if (iq_corr_a0_d1 > 0x800)
 		iq_corr_a0_d1 = -((0xfff - iq_corr_a0_d1) + 1);
@@ -482,6 +700,19 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
 		return false;
 	}
 
+	if ((i2_p_q2_a0_d0 < 1024) || (i2_p_q2_a0_d0 > 2047) ||
+            (i2_p_q2_a1_d0 < 0) || (i2_p_q2_a1_d1 < 0) ||
+            (i2_p_q2_a0_d0 <= i2_m_q2_a0_d0) ||
+            (i2_p_q2_a0_d0 <= iq_corr_a0_d0) ||
+            (i2_p_q2_a0_d1 <= i2_m_q2_a0_d1) ||
+            (i2_p_q2_a0_d1 <= iq_corr_a0_d1) ||
+            (i2_p_q2_a1_d0 <= i2_m_q2_a1_d0) ||
+            (i2_p_q2_a1_d0 <= iq_corr_a1_d0) ||
+            (i2_p_q2_a1_d1 <= i2_m_q2_a1_d1) ||
+            (i2_p_q2_a1_d1 <= iq_corr_a1_d1)) {
+		return false;
+	}
+
 	mag_a0_d0 = (i2_m_q2_a0_d0 * res_scale) / i2_p_q2_a0_d0;
 	phs_a0_d0 = (iq_corr_a0_d0 * res_scale) / i2_p_q2_a0_d0;
 
@@ -898,7 +1129,7 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah)
 
 static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
 {
-	int offset[8], total = 0, test;
+	int offset[8] = {0}, total = 0, test;
 	int agc_out, i;
 
 	REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
@@ -923,12 +1154,18 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
 		      AR_PHY_65NM_RXRF_AGC_AGC_ON_OVR, 0x1);
 	REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
 		      AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0x1);
-	if (is_2g)
-		REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
-			      AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR, 0x0);
-	else
+
+	if (AR_SREV_9330_11(ah)) {
 		REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
-			      AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR, 0x0);
+			      AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0);
+	} else {
+		if (is_2g)
+			REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+				      AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR, 0x0);
+		else
+			REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+				      AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR, 0x0);
+	}
 
 	for (i = 6; i > 0; i--) {
 		offset[i] = BIT(i - 1);
@@ -964,9 +1201,9 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
 		      AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0);
 }
 
-static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah,
-					 struct ath9k_channel *chan,
-					 bool run_rtt_cal)
+static void ar9003_hw_do_pcoem_manual_peak_cal(struct ath_hw *ah,
+					       struct ath9k_channel *chan,
+					       bool run_rtt_cal)
 {
 	struct ath9k_hw_cal_data *caldata = ah->caldata;
 	int i;
@@ -1040,14 +1277,14 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
 	}
 }
 
-static bool ar9003_hw_init_cal(struct ath_hw *ah,
-			       struct ath9k_channel *chan)
+static bool ar9003_hw_init_cal_pcoem(struct ath_hw *ah,
+				     struct ath9k_channel *chan)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath9k_hw_cal_data *caldata = ah->caldata;
 	bool txiqcal_done = false;
 	bool is_reusable = true, status = true;
-	bool run_rtt_cal = false, run_agc_cal, sep_iq_cal = false;
+	bool run_rtt_cal = false, run_agc_cal;
 	bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
 	u32 rx_delay = 0;
 	u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
@@ -1119,22 +1356,12 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
 			REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
 				    AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
 		txiqcal_done = run_agc_cal = true;
-	} else if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags)) {
-		run_agc_cal = true;
-		sep_iq_cal = true;
 	}
 
 skip_tx_iqcal:
 	if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
 		ar9003_mci_init_cal_req(ah, &is_reusable);
 
-	if (sep_iq_cal) {
-		txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
-		REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
-		udelay(5);
-		REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
-	}
-
 	if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
 		rx_delay = REG_READ(ah, AR_PHY_RX_DELAY);
 		/* Disable BB_active */
@@ -1155,7 +1382,7 @@ skip_tx_iqcal:
 				       AR_PHY_AGC_CONTROL_CAL,
 				       0, AH_WAIT_TIMEOUT);
 
-		ar9003_hw_do_manual_peak_cal(ah, chan, run_rtt_cal);
+		ar9003_hw_do_pcoem_manual_peak_cal(ah, chan, run_rtt_cal);
 	}
 
 	if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
@@ -1228,13 +1455,117 @@ skip_tx_iqcal:
 	return true;
 }
 
+static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
+				   struct ath9k_channel *chan)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath9k_hw_cal_data *caldata = ah->caldata;
+	bool txiqcal_done = false;
+	bool is_reusable = true, status = true;
+	bool run_agc_cal = false, sep_iq_cal = false;
+
+	/* Use chip chainmask only for calibration */
+	ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
+
+	if (ah->enabled_cals & TX_CL_CAL) {
+		REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+		run_agc_cal = true;
+	}
+
+	if (IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))
+		goto skip_tx_iqcal;
+
+	/* Do Tx IQ Calibration */
+	REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
+		      AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
+		      DELPT);
+
+	/*
+	 * For AR9485 or later chips, TxIQ cal runs as part of
+	 * AGC calibration. Specifically, AR9550 in SoC chips.
+	 */
+	if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
+		txiqcal_done = true;
+		run_agc_cal = true;
+	} else {
+		sep_iq_cal = true;
+		run_agc_cal = true;
+	}
+
+	/*
+	 * In the SoC family, this will run for AR9300, AR9331 and AR9340.
+	 */
+	if (sep_iq_cal) {
+		txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
+		REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+		udelay(5);
+		REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+	}
+
+	if (AR_SREV_9550(ah) && IS_CHAN_2GHZ(chan)) {
+		if (!ar9003_hw_dynamic_osdac_selection(ah, txiqcal_done))
+			return false;
+	}
+
+skip_tx_iqcal:
+	if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
+		if (AR_SREV_9330_11(ah))
+			ar9003_hw_manual_peak_cal(ah, 0, IS_CHAN_2GHZ(chan));
+
+		/* Calibrate the AGC */
+		REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+			  REG_READ(ah, AR_PHY_AGC_CONTROL) |
+			  AR_PHY_AGC_CONTROL_CAL);
+
+		/* Poll for offset calibration complete */
+		status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+				       AR_PHY_AGC_CONTROL_CAL,
+				       0, AH_WAIT_TIMEOUT);
+	}
+
+	if (!status) {
+		ath_dbg(common, CALIBRATE,
+			"offset calibration failed to complete in %d ms; noisy environment?\n",
+			AH_WAIT_TIMEOUT / 1000);
+		return false;
+	}
+
+	if (txiqcal_done)
+		ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
+
+	/* Revert chainmask to runtime parameters */
+	ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
+
+	/* Initialize list pointers */
+	ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+
+	INIT_CAL(&ah->iq_caldata);
+	INSERT_CAL(ah, &ah->iq_caldata);
+	ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
+
+	/* Initialize current pointer to first element in list */
+	ah->cal_list_curr = ah->cal_list;
+
+	if (ah->cal_list_curr)
+		ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
+
+	if (caldata)
+		caldata->CalValid = 0;
+
+	return true;
+}
+
 void ar9003_hw_attach_calib_ops(struct ath_hw *ah)
 {
 	struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
 	struct ath_hw_ops *ops = ath9k_hw_ops(ah);
 
+	if (AR_SREV_9485(ah) || AR_SREV_9462(ah) || AR_SREV_9565(ah))
+		priv_ops->init_cal = ar9003_hw_init_cal_pcoem;
+	else
+		priv_ops->init_cal = ar9003_hw_init_cal_soc;
+
 	priv_ops->init_cal_settings = ar9003_hw_init_cal_settings;
-	priv_ops->init_cal = ar9003_hw_init_cal;
 	priv_ops->setup_calibration = ar9003_hw_setup_calibration;
 
 	ops->calibrate = ar9003_hw_calibrate;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 130657db5c43..25243cbc07f0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -131,6 +131,7 @@ static const struct ar9300_eeprom ar9300_default = {
 		.thresh62 = 28,
 		.papdRateMaskHt20 = LE32(0x0cf0e0e0),
 		.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+		.switchcomspdt = 0,
 		.xlna_bias_strength = 0,
 		.futureModal = {
 			0, 0, 0, 0, 0, 0, 0,
@@ -138,7 +139,7 @@ static const struct ar9300_eeprom ar9300_default = {
 	 },
 	.base_ext1 = {
 		.ant_div_control = 0,
-		.future = {0, 0, 0},
+		.future = {0, 0},
 		.tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
 	},
 	.calFreqPier2G = {
@@ -333,6 +334,7 @@ static const struct ar9300_eeprom ar9300_default = {
 		.thresh62 = 28,
 		.papdRateMaskHt20 = LE32(0x0c80c080),
 		.papdRateMaskHt40 = LE32(0x0080c080),
+		.switchcomspdt = 0,
 		.xlna_bias_strength = 0,
 		.futureModal = {
 			0, 0, 0, 0, 0, 0, 0,
@@ -707,6 +709,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
 		.thresh62 = 28,
 		.papdRateMaskHt20 = LE32(0x0c80c080),
 		.papdRateMaskHt40 = LE32(0x0080c080),
+		.switchcomspdt = 0,
 		.xlna_bias_strength = 0,
 		.futureModal = {
 			0, 0, 0, 0, 0, 0, 0,
@@ -714,7 +717,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
 	 },
 	 .base_ext1 = {
 		.ant_div_control = 0,
-		.future = {0, 0, 0},
+		.future = {0, 0},
 		.tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
 	 },
 	.calFreqPier2G = {
@@ -909,6 +912,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
 		.thresh62 = 28,
 		.papdRateMaskHt20 = LE32(0x0cf0e0e0),
 		.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+		.switchcomspdt = 0,
 		.xlna_bias_strength = 0,
 		.futureModal = {
 			0, 0, 0, 0, 0, 0, 0,
@@ -1284,6 +1288,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
 		.thresh62 = 28,
 		.papdRateMaskHt20 = LE32(0x0c80c080),
 		.papdRateMaskHt40 = LE32(0x0080c080),
+		.switchcomspdt = 0,
 		.xlna_bias_strength = 0,
 		.futureModal = {
 			0, 0, 0, 0, 0, 0, 0,
@@ -1291,7 +1296,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
 	},
 	.base_ext1 = {
 		.ant_div_control = 0,
-		.future = {0, 0, 0},
+		.future = {0, 0},
 		.tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
 	},
 	.calFreqPier2G = {
@@ -1486,6 +1491,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
 		.thresh62 = 28,
 		.papdRateMaskHt20 = LE32(0x0cf0e0e0),
 		.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+		.switchcomspdt = 0,
 		.xlna_bias_strength = 0,
 		.futureModal = {
 			0, 0, 0, 0, 0, 0, 0,
@@ -1861,6 +1867,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
 		.thresh62 = 28,
 		.papdRateMaskHt20 = LE32(0x0c80c080),
 		.papdRateMaskHt40 = LE32(0x0080c080),
+		.switchcomspdt = 0,
 		.xlna_bias_strength = 0,
 		.futureModal = {
 			0, 0, 0, 0, 0, 0, 0,
@@ -1868,7 +1875,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
 	},
 	.base_ext1 = {
 		.ant_div_control = 0,
-		.future = {0, 0, 0},
+		.future = {0, 0},
 		.tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
 	},
 	.calFreqPier2G = {
@@ -2063,6 +2070,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
 		.thresh62 = 28,
 		.papdRateMaskHt20 = LE32(0x0cf0e0e0),
 		.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+		.switchcomspdt = 0,
 		.xlna_bias_strength = 0,
 		.futureModal = {
 			0, 0, 0, 0, 0, 0, 0,
@@ -2437,6 +2445,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
 		.thresh62 = 28,
 		.papdRateMaskHt20 = LE32(0x0c80C080),
 		.papdRateMaskHt40 = LE32(0x0080C080),
+		.switchcomspdt = 0,
 		.xlna_bias_strength = 0,
 		.futureModal = {
 			0, 0, 0, 0, 0, 0, 0,
@@ -2444,7 +2453,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
 	 },
 	 .base_ext1 = {
 		.ant_div_control = 0,
-		.future = {0, 0, 0},
+		.future = {0, 0},
 		.tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
 	 },
 	.calFreqPier2G = {
@@ -2639,6 +2648,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
 		.thresh62 = 28,
 		.papdRateMaskHt20 = LE32(0x0cf0e0e0),
 		.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+		.switchcomspdt = 0,
 		.xlna_bias_strength = 0,
 		.futureModal = {
 			0, 0, 0, 0, 0, 0, 0,
@@ -3588,7 +3598,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
 	if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
 		REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
 				AR_SWITCH_TABLE_COM_AR9462_ALL, value);
-	} else if (AR_SREV_9550(ah)) {
+	} else if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
 		REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
 				AR_SWITCH_TABLE_COM_AR9550_ALL, value);
 	} else
@@ -3965,7 +3975,7 @@ static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah)
 	struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
 	u8 tuning_caps_param = eep->baseEepHeader.params_for_tuning_caps[0];
 
-	if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
+	if (AR_SREV_9340(ah) || AR_SREV_9531(ah))
 		return;
 
 	if (eep->baseEepHeader.featureEnable & 0x40) {
@@ -4020,7 +4030,10 @@ static void ar9003_hw_xpa_timing_control_apply(struct ath_hw *ah, bool is2ghz)
 	if (!(eep->baseEepHeader.featureEnable & 0x80))
 		return;
 
-	if (!AR_SREV_9300(ah) && !AR_SREV_9340(ah) && !AR_SREV_9580(ah))
+	if (!AR_SREV_9300(ah) &&
+	    !AR_SREV_9340(ah) &&
+	    !AR_SREV_9580(ah) &&
+	    !AR_SREV_9531(ah))
 		return;
 
 	xpa_ctl = ar9003_modal_header(ah, is2ghz)->txFrameToXpaOn;
@@ -4111,6 +4124,37 @@ static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
 	}
 }
 
+static void ar9003_hw_apply_minccapwr_thresh(struct ath_hw *ah,
+					     bool is2ghz)
+{
+	struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+	const u_int32_t cca_ctrl[AR9300_MAX_CHAINS] = {
+		AR_PHY_CCA_CTRL_0,
+		AR_PHY_CCA_CTRL_1,
+		AR_PHY_CCA_CTRL_2,
+	};
+	int chain;
+	u32 val;
+
+	if (is2ghz) {
+		if (!(eep->base_ext1.misc_enable & BIT(2)))
+			return;
+	} else {
+		if (!(eep->base_ext1.misc_enable & BIT(3)))
+			return;
+	}
+
+	for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+		if (!(ah->caps.tx_chainmask & BIT(chain)))
+			continue;
+
+		val = ar9003_modal_header(ah, is2ghz)->noiseFloorThreshCh[chain];
+		REG_RMW_FIELD(ah, cca_ctrl[chain],
+			      AR_PHY_EXT_CCA0_THRESH62_1, val);
+	}
+
+}
+
 static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
 					     struct ath9k_channel *chan)
 {
@@ -4122,9 +4166,10 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
 	ar9003_hw_xlna_bias_strength_apply(ah, is2ghz);
 	ar9003_hw_atten_apply(ah, chan);
 	ar9003_hw_quick_drop_apply(ah, chan->channel);
-	if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah) && !AR_SREV_9550(ah))
+	if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah) && !AR_SREV_9531(ah))
 		ar9003_hw_internal_regulator_apply(ah);
 	ar9003_hw_apply_tuning_caps(ah);
+	ar9003_hw_apply_minccapwr_thresh(ah, chan);
 	ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz);
 	ar9003_hw_thermometer_apply(ah);
 	ar9003_hw_thermo_cal_apply(ah);
@@ -4746,7 +4791,7 @@ static void ar9003_hw_power_control_override(struct ath_hw *ah,
 	}
 
 tempslope:
-	if (AR_SREV_9550(ah)) {
+	if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
 		/*
 		 * AR955x has tempSlope register for each chain.
 		 * Check whether temp_compensation feature is enabled or not.
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 0e5daa58a4fc..694ca2e680e5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -270,10 +270,20 @@ struct cal_ctl_data_5g {
 	u8 ctlEdges[AR9300_NUM_BAND_EDGES_5G];
 } __packed;
 
+#define MAX_BASE_EXTENSION_FUTURE 2
+
 struct ar9300_BaseExtension_1 {
 	u8 ant_div_control;
-	u8 future[3];
-	u8 tempslopextension[8];
+	u8 future[MAX_BASE_EXTENSION_FUTURE];
+	/*
+	 * misc_enable:
+	 *
+	 * BIT 0   - TX Gain Cap enable.
+	 * BIT 1   - Uncompressed Checksum enable.
+	 * BIT 2/3 - MinCCApwr enable 2g/5g.
+	 */
+	u8 misc_enable;
+	int8_t tempslopextension[8];
 	int8_t quick_drop_low;
 	int8_t quick_drop_high;
 } __packed;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 20e49095db2a..ec1da0cc25f5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -17,6 +17,7 @@
 #include "hw.h"
 #include "ar9003_mac.h"
 #include "ar9003_2p2_initvals.h"
+#include "ar9003_buffalo_initvals.h"
 #include "ar9485_initvals.h"
 #include "ar9340_initvals.h"
 #include "ar9330_1p1_initvals.h"
@@ -26,6 +27,8 @@
 #include "ar9462_2p0_initvals.h"
 #include "ar9462_2p1_initvals.h"
 #include "ar9565_1p0_initvals.h"
+#include "ar9565_1p1_initvals.h"
+#include "ar953x_initvals.h"
 
 /* General hardware code for the AR9003 hadware family */
 
@@ -148,7 +151,11 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
 				ar9340Modes_high_ob_db_tx_gain_table_1p0);
 
 		INIT_INI_ARRAY(&ah->iniModesFastClock,
-				ar9340Modes_fast_clock_1p0);
+			       ar9340Modes_fast_clock_1p0);
+		INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+			       ar9340_1p0_baseband_core_txfir_coeff_japan_2484);
+		INIT_INI_ARRAY(&ah->ini_dfs,
+			       ar9340_1p0_baseband_postamble_dfs_channel);
 
 		if (!ah->is_clk_25mhz)
 			INIT_INI_ARRAY(&ah->iniAdditional,
@@ -223,6 +230,10 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
 			       ar9462_2p1_modes_fast_clock);
 		INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
 			       ar9462_2p1_baseband_core_txfir_coeff_japan_2484);
+		INIT_INI_ARRAY(&ah->iniPcieSerdes,
+			       ar9462_2p1_pciephy_clkreq_disable_L1);
+		INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+			       ar9462_2p1_pciephy_clkreq_disable_L1);
 	} else if (AR_SREV_9462_20(ah)) {
 
 		INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core);
@@ -247,18 +258,18 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
 				ar9462_2p0_soc_postamble);
 
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
-				ar9462_common_rx_gain_table_2p0);
+				ar9462_2p0_common_rx_gain);
 
 		/* Awake -> Sleep Setting */
 		INIT_INI_ARRAY(&ah->iniPcieSerdes,
-			       ar9462_pciephy_clkreq_disable_L1_2p0);
+			       ar9462_2p0_pciephy_clkreq_disable_L1);
 		/* Sleep -> Awake Setting */
 		INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-			       ar9462_pciephy_clkreq_disable_L1_2p0);
+			       ar9462_2p0_pciephy_clkreq_disable_L1);
 
 		/* Fast clock modal settings */
 		INIT_INI_ARRAY(&ah->iniModesFastClock,
-				ar9462_modes_fast_clock_2p0);
+				ar9462_2p0_modes_fast_clock);
 
 		INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
 			       ar9462_2p0_baseband_core_txfir_coeff_japan_2484);
@@ -298,6 +309,31 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
 		/* Fast clock modal settings */
 		INIT_INI_ARRAY(&ah->iniModesFastClock,
 				ar955x_1p0_modes_fast_clock);
+	} else if (AR_SREV_9531(ah)) {
+		INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+			       qca953x_1p0_mac_core);
+		INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+			       qca953x_1p0_mac_postamble);
+		INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+			       qca953x_1p0_baseband_core);
+		INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+			       qca953x_1p0_baseband_postamble);
+		INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+			       qca953x_1p0_radio_core);
+		INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+			       qca953x_1p0_radio_postamble);
+		INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+			       qca953x_1p0_soc_preamble);
+		INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+			       qca953x_1p0_soc_postamble);
+		INIT_INI_ARRAY(&ah->iniModesRxGain,
+			       qca953x_1p0_common_wo_xlna_rx_gain_table);
+		INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+			       qca953x_1p0_common_wo_xlna_rx_gain_bounds);
+		INIT_INI_ARRAY(&ah->iniModesTxGain,
+			       qca953x_1p0_modes_no_xpa_tx_gain_table);
+		INIT_INI_ARRAY(&ah->iniModesFastClock,
+			       qca953x_1p0_modes_fast_clock);
 	} else if (AR_SREV_9580(ah)) {
 		/* mac */
 		INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -330,7 +366,46 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
 				ar9580_1p0_low_ob_db_tx_gain_table);
 
 		INIT_INI_ARRAY(&ah->iniModesFastClock,
-				ar9580_1p0_modes_fast_clock);
+			       ar9580_1p0_modes_fast_clock);
+		INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+			       ar9580_1p0_baseband_core_txfir_coeff_japan_2484);
+		INIT_INI_ARRAY(&ah->ini_dfs,
+			       ar9580_1p0_baseband_postamble_dfs_channel);
+	} else if (AR_SREV_9565_11_OR_LATER(ah)) {
+		INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+			       ar9565_1p1_mac_core);
+		INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+			       ar9565_1p1_mac_postamble);
+
+		INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+			       ar9565_1p1_baseband_core);
+		INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+			       ar9565_1p1_baseband_postamble);
+
+		INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+			       ar9565_1p1_radio_core);
+		INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+			       ar9565_1p1_radio_postamble);
+
+		INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+			       ar9565_1p1_soc_preamble);
+		INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+			       ar9565_1p1_soc_postamble);
+
+		INIT_INI_ARRAY(&ah->iniModesRxGain,
+			       ar9565_1p1_Common_rx_gain_table);
+		INIT_INI_ARRAY(&ah->iniModesTxGain,
+			       ar9565_1p1_Modes_lowest_ob_db_tx_gain_table);
+
+		INIT_INI_ARRAY(&ah->iniPcieSerdes,
+			       ar9565_1p1_pciephy_clkreq_disable_L1);
+		INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+			       ar9565_1p1_pciephy_clkreq_disable_L1);
+
+		INIT_INI_ARRAY(&ah->iniModesFastClock,
+				ar9565_1p1_modes_fast_clock);
+		INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+			       ar9565_1p1_baseband_core_txfir_coeff_japan_2484);
 	} else if (AR_SREV_9565(ah)) {
 		INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
 			       ar9565_1p0_mac_core);
@@ -411,7 +486,11 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
 
 		/* Fast clock modal settings */
 		INIT_INI_ARRAY(&ah->iniModesFastClock,
-				ar9300Modes_fast_clock_2p2);
+			       ar9300Modes_fast_clock_2p2);
+		INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+			       ar9300_2p2_baseband_core_txfir_coeff_japan_2484);
+		INIT_INI_ARRAY(&ah->ini_dfs,
+			       ar9300_2p2_baseband_postamble_dfs_channel);
 	}
 }
 
@@ -432,6 +511,9 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
 	else if (AR_SREV_9550(ah))
 		INIT_INI_ARRAY(&ah->iniModesTxGain,
 			ar955x_1p0_modes_xpa_tx_gain_table);
+	else if (AR_SREV_9531(ah))
+		INIT_INI_ARRAY(&ah->iniModesTxGain,
+			qca953x_1p0_modes_xpa_tx_gain_table);
 	else if (AR_SREV_9580(ah))
 		INIT_INI_ARRAY(&ah->iniModesTxGain,
 			ar9580_1p0_lowest_ob_db_tx_gain_table);
@@ -440,7 +522,10 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
 			ar9462_2p1_modes_low_ob_db_tx_gain);
 	else if (AR_SREV_9462_20(ah))
 		INIT_INI_ARRAY(&ah->iniModesTxGain,
-			ar9462_modes_low_ob_db_tx_gain_table_2p0);
+			ar9462_2p0_modes_low_ob_db_tx_gain);
+	else if (AR_SREV_9565_11(ah))
+		INIT_INI_ARRAY(&ah->iniModesTxGain,
+			       ar9565_1p1_modes_low_ob_db_tx_gain_table);
 	else if (AR_SREV_9565(ah))
 		INIT_INI_ARRAY(&ah->iniModesTxGain,
 			       ar9565_1p0_modes_low_ob_db_tx_gain_table);
@@ -469,12 +554,22 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
 	else if (AR_SREV_9550(ah))
 		INIT_INI_ARRAY(&ah->iniModesTxGain,
 			ar955x_1p0_modes_no_xpa_tx_gain_table);
-	else if (AR_SREV_9462_21(ah))
+	else if (AR_SREV_9531(ah)) {
+		if (AR_SREV_9531_11(ah))
+			INIT_INI_ARRAY(&ah->iniModesTxGain,
+				       qca953x_1p1_modes_no_xpa_tx_gain_table);
+		else
+			INIT_INI_ARRAY(&ah->iniModesTxGain,
+				       qca953x_1p0_modes_no_xpa_tx_gain_table);
+	} else if (AR_SREV_9462_21(ah))
 		INIT_INI_ARRAY(&ah->iniModesTxGain,
 			ar9462_2p1_modes_high_ob_db_tx_gain);
 	else if (AR_SREV_9462_20(ah))
 		INIT_INI_ARRAY(&ah->iniModesTxGain,
-			ar9462_modes_high_ob_db_tx_gain_table_2p0);
+			ar9462_2p0_modes_high_ob_db_tx_gain);
+	else if (AR_SREV_9565_11(ah))
+		INIT_INI_ARRAY(&ah->iniModesTxGain,
+			       ar9565_1p1_modes_high_ob_db_tx_gain_table);
 	else if (AR_SREV_9565(ah))
 		INIT_INI_ARRAY(&ah->iniModesTxGain,
 			       ar9565_1p0_modes_high_ob_db_tx_gain_table);
@@ -500,6 +595,9 @@ static void ar9003_tx_gain_table_mode2(struct ath_hw *ah)
 	else if (AR_SREV_9580(ah))
 		INIT_INI_ARRAY(&ah->iniModesTxGain,
 			ar9580_1p0_low_ob_db_tx_gain_table);
+	else if (AR_SREV_9565_11(ah))
+		INIT_INI_ARRAY(&ah->iniModesTxGain,
+			       ar9565_1p1_modes_low_ob_db_tx_gain_table);
 	else if (AR_SREV_9565(ah))
 		INIT_INI_ARRAY(&ah->iniModesTxGain,
 			       ar9565_1p0_modes_low_ob_db_tx_gain_table);
@@ -525,12 +623,20 @@ static void ar9003_tx_gain_table_mode3(struct ath_hw *ah)
 	else if (AR_SREV_9580(ah))
 		INIT_INI_ARRAY(&ah->iniModesTxGain,
 			ar9580_1p0_high_power_tx_gain_table);
+	else if (AR_SREV_9565_11(ah))
+		INIT_INI_ARRAY(&ah->iniModesTxGain,
+			       ar9565_1p1_modes_high_power_tx_gain_table);
 	else if (AR_SREV_9565(ah))
 		INIT_INI_ARRAY(&ah->iniModesTxGain,
 			       ar9565_1p0_modes_high_power_tx_gain_table);
-	else
-		INIT_INI_ARRAY(&ah->iniModesTxGain,
-			ar9300Modes_high_power_tx_gain_table_2p2);
+	else {
+		if (ah->config.tx_gain_buffalo)
+			INIT_INI_ARRAY(&ah->iniModesTxGain,
+				       ar9300Modes_high_power_tx_gain_table_buffalo);
+		else
+			INIT_INI_ARRAY(&ah->iniModesTxGain,
+				       ar9300Modes_high_power_tx_gain_table_2p2);
+	}
 }
 
 static void ar9003_tx_gain_table_mode4(struct ath_hw *ah)
@@ -546,7 +652,7 @@ static void ar9003_tx_gain_table_mode4(struct ath_hw *ah)
 		       ar9462_2p1_modes_mix_ob_db_tx_gain);
 	else if (AR_SREV_9462_20(ah))
 		INIT_INI_ARRAY(&ah->iniModesTxGain,
-		       ar9462_modes_mix_ob_db_tx_gain_table_2p0);
+		       ar9462_2p0_modes_mix_ob_db_tx_gain);
 	else
 		INIT_INI_ARRAY(&ah->iniModesTxGain,
 			ar9300Modes_mixed_ob_db_tx_gain_table_2p2);
@@ -581,6 +687,13 @@ static void ar9003_tx_gain_table_mode6(struct ath_hw *ah)
 			ar9580_1p0_type6_tx_gain_table);
 }
 
+static void ar9003_tx_gain_table_mode7(struct ath_hw *ah)
+{
+	if (AR_SREV_9340(ah))
+		INIT_INI_ARRAY(&ah->iniModesTxGain,
+			       ar9340_cus227_tx_gain_table_1p0);
+}
+
 typedef void (*ath_txgain_tab)(struct ath_hw *ah);
 
 static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
@@ -593,6 +706,7 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
 		ar9003_tx_gain_table_mode4,
 		ar9003_tx_gain_table_mode5,
 		ar9003_tx_gain_table_mode6,
+		ar9003_tx_gain_table_mode7,
 	};
 	int idx = ar9003_hw_get_tx_gain_idx(ah);
 
@@ -621,6 +735,11 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
 				ar955x_1p0_common_rx_gain_table);
 		INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
 				ar955x_1p0_common_rx_gain_bounds);
+	} else if (AR_SREV_9531(ah)) {
+		INIT_INI_ARRAY(&ah->iniModesRxGain,
+			       qca953x_1p0_common_rx_gain_table);
+		INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+			       qca953x_1p0_common_rx_gain_bounds);
 	} else if (AR_SREV_9580(ah))
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
 				ar9580_1p0_rx_gain_table);
@@ -629,7 +748,10 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
 				ar9462_2p1_common_rx_gain);
 	else if (AR_SREV_9462_20(ah))
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
-				ar9462_common_rx_gain_table_2p0);
+				ar9462_2p0_common_rx_gain);
+	else if (AR_SREV_9565_11(ah))
+		INIT_INI_ARRAY(&ah->iniModesRxGain,
+			       ar9565_1p1_Common_rx_gain_table);
 	else if (AR_SREV_9565(ah))
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
 			       ar9565_1p0_Common_rx_gain_table);
@@ -657,15 +779,23 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
 			ar9462_2p1_common_wo_xlna_rx_gain);
 	else if (AR_SREV_9462_20(ah))
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
-			ar9462_common_wo_xlna_rx_gain_table_2p0);
+			ar9462_2p0_common_wo_xlna_rx_gain);
 	else if (AR_SREV_9550(ah)) {
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
 			ar955x_1p0_common_wo_xlna_rx_gain_table);
 		INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
 			ar955x_1p0_common_wo_xlna_rx_gain_bounds);
+	} else if (AR_SREV_9531(ah)) {
+		INIT_INI_ARRAY(&ah->iniModesRxGain,
+			       qca953x_1p0_common_wo_xlna_rx_gain_table);
+		INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+			       qca953x_1p0_common_wo_xlna_rx_gain_bounds);
 	} else if (AR_SREV_9580(ah))
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
 			ar9580_1p0_wo_xlna_rx_gain_table);
+	else if (AR_SREV_9565_11(ah))
+		INIT_INI_ARRAY(&ah->iniModesRxGain,
+			       ar9565_1p1_common_wo_xlna_rx_gain_table);
 	else if (AR_SREV_9565(ah))
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
 			       ar9565_1p0_common_wo_xlna_rx_gain_table);
@@ -687,7 +817,7 @@ static void ar9003_rx_gain_table_mode2(struct ath_hw *ah)
 			       ar9462_2p1_baseband_postamble_5g_xlna);
 	} else if (AR_SREV_9462_20(ah)) {
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
-			       ar9462_common_mixed_rx_gain_table_2p0);
+			       ar9462_2p0_common_mixed_rx_gain);
 		INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_core,
 			       ar9462_2p0_baseband_core_mix_rxgain);
 		INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
@@ -701,12 +831,12 @@ static void ar9003_rx_gain_table_mode3(struct ath_hw *ah)
 {
 	if (AR_SREV_9462_21(ah)) {
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
-			       ar9462_2p1_common_5g_xlna_only_rx_gain);
+			       ar9462_2p1_common_5g_xlna_only_rxgain);
 		INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
 			       ar9462_2p1_baseband_postamble_5g_xlna);
 	} else if (AR_SREV_9462_20(ah)) {
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
-			       ar9462_2p0_5g_xlna_only_rxgain);
+			       ar9462_2p0_common_5g_xlna_only_rxgain);
 		INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
 			       ar9462_2p0_baseband_postamble_5g_xlna);
 	}
@@ -750,6 +880,9 @@ static void ar9003_hw_init_mode_gain_regs(struct ath_hw *ah)
 static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
 					 bool power_off)
 {
+	unsigned int i;
+	struct ar5416IniArray *array;
+
 	/*
 	 * Increase L1 Entry Latency. Some WB222 boards don't have
 	 * this change in eeprom/OTP.
@@ -775,19 +908,125 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
 	 * Configire PCIE after Ini init. SERDES values now come from ini file
 	 * This enables PCIe low power mode.
 	 */
-	if (ah->config.pcieSerDesWrite) {
-		unsigned int i;
-		struct ar5416IniArray *array;
+	array = power_off ? &ah->iniPcieSerdes :
+		&ah->iniPcieSerdesLowPower;
+
+	for (i = 0; i < array->ia_rows; i++) {
+		REG_WRITE(ah,
+			  INI_RA(array, i, 0),
+			  INI_RA(array, i, 1));
+	}
+}
+
+static void ar9003_hw_init_hang_checks(struct ath_hw *ah)
+{
+	/*
+	 * All chips support detection of BB/MAC hangs.
+	 */
+	ah->config.hw_hang_checks |= HW_BB_WATCHDOG;
+	ah->config.hw_hang_checks |= HW_MAC_HANG;
+
+	/*
+	 * This is not required for AR9580 1.0
+	 */
+	if (AR_SREV_9300_22(ah))
+		ah->config.hw_hang_checks |= HW_PHYRESTART_CLC_WAR;
+
+	if (AR_SREV_9330(ah))
+		ah->bb_watchdog_timeout_ms = 85;
+	else
+		ah->bb_watchdog_timeout_ms = 25;
+}
 
-		array = power_off ? &ah->iniPcieSerdes :
-				    &ah->iniPcieSerdesLowPower;
+/*
+ * MAC HW hang check
+ * =================
+ *
+ * Signature: dcu_chain_state is 0x6 and dcu_complete_state is 0x1.
+ *
+ * The state of each DCU chain (mapped to TX queues) is available from these
+ * DMA debug registers:
+ *
+ * Chain 0 state : Bits 4:0   of AR_DMADBG_4
+ * Chain 1 state : Bits 9:5   of AR_DMADBG_4
+ * Chain 2 state : Bits 14:10 of AR_DMADBG_4
+ * Chain 3 state : Bits 19:15 of AR_DMADBG_4
+ * Chain 4 state : Bits 24:20 of AR_DMADBG_4
+ * Chain 5 state : Bits 29:25 of AR_DMADBG_4
+ * Chain 6 state : Bits 4:0   of AR_DMADBG_5
+ * Chain 7 state : Bits 9:5   of AR_DMADBG_5
+ * Chain 8 state : Bits 14:10 of AR_DMADBG_5
+ * Chain 9 state : Bits 19:15 of AR_DMADBG_5
+ *
+ * The DCU chain state "0x6" means "WAIT_FRDONE" - wait for TX frame to be done.
+ */
+
+#define NUM_STATUS_READS 50
+
+static bool ath9k_hw_verify_hang(struct ath_hw *ah, unsigned int queue)
+{
+	u32 dma_dbg_chain, dma_dbg_complete;
+	u8 dcu_chain_state, dcu_complete_state;
+	int i;
+
+	for (i = 0; i < NUM_STATUS_READS; i++) {
+		if (queue < 6)
+			dma_dbg_chain = REG_READ(ah, AR_DMADBG_4);
+		else
+			dma_dbg_chain = REG_READ(ah, AR_DMADBG_5);
+
+		dma_dbg_complete = REG_READ(ah, AR_DMADBG_6);
+
+		dcu_chain_state = (dma_dbg_chain >> (5 * queue)) & 0x1f;
+		dcu_complete_state = dma_dbg_complete & 0x3;
+
+		if ((dcu_chain_state != 0x6) || (dcu_complete_state != 0x1))
+			return false;
+	}
+
+	ath_dbg(ath9k_hw_common(ah), RESET,
+		"MAC Hang signature found for queue: %d\n", queue);
+
+	return true;
+}
+
+static bool ar9003_hw_detect_mac_hang(struct ath_hw *ah)
+{
+	u32 dma_dbg_4, dma_dbg_5, dma_dbg_6, chk_dbg;
+	u8 dcu_chain_state, dcu_complete_state;
+	bool dcu_wait_frdone = false;
+	unsigned long chk_dcu = 0;
+	unsigned int i = 0;
+
+	dma_dbg_4 = REG_READ(ah, AR_DMADBG_4);
+	dma_dbg_5 = REG_READ(ah, AR_DMADBG_5);
+	dma_dbg_6 = REG_READ(ah, AR_DMADBG_6);
+
+	dcu_complete_state = dma_dbg_6 & 0x3;
+	if (dcu_complete_state != 0x1)
+		goto exit;
+
+	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+		if (i < 6)
+			chk_dbg = dma_dbg_4;
+		else
+			chk_dbg = dma_dbg_5;
+
+		dcu_chain_state = (chk_dbg >> (5 * i)) & 0x1f;
+		if (dcu_chain_state == 0x6) {
+			dcu_wait_frdone = true;
+			chk_dcu |= BIT(i);
+		}
+	}
 
-		for (i = 0; i < array->ia_rows; i++) {
-			REG_WRITE(ah,
-				  INI_RA(array, i, 0),
-				  INI_RA(array, i, 1));
+	if ((dcu_complete_state == 0x1) && dcu_wait_frdone) {
+		for_each_set_bit(i, &chk_dcu, ATH9K_NUM_TX_QUEUES) {
+			if (ath9k_hw_verify_hang(ah, i))
+				return true;
 		}
 	}
+exit:
+	return false;
 }
 
 /* Sets up the AR9003 hardware familiy callbacks */
@@ -798,6 +1037,8 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
 
 	ar9003_hw_init_mode_regs(ah);
 	priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs;
+	priv_ops->init_hang_checks = ar9003_hw_init_hang_checks;
+	priv_ops->detect_mac_hang = ar9003_hw_detect_mac_hang;
 
 	ops->config_pci_powersave = ar9003_hw_configpcipowersave;
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index f6c5c1b50471..729ffbf07343 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -175,7 +175,8 @@ static void ar9003_hw_set_desc_link(void *ds, u32 ds_link)
 	ads->ctl10 |= ar9003_calc_ptr_chksum(ads);
 }
 
-static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
+			      u32 *sync_cause_p)
 {
 	u32 isr = 0;
 	u32 mask2 = 0;
@@ -310,7 +311,8 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
 		ar9003_mci_get_isr(ah, masked);
 
 	if (sync_cause) {
-		ath9k_debug_sync_cause(common, sync_cause);
+		if (sync_cause_p)
+			*sync_cause_p = sync_cause;
 		fatal_int =
 			(sync_cause &
 			 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
@@ -476,12 +478,12 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
 
 	/* XXX: Keycache */
 	rxs->rs_rssi = MS(rxsp->status5, AR_RxRSSICombined);
-	rxs->rs_rssi_ctl0 = MS(rxsp->status1, AR_RxRSSIAnt00);
-	rxs->rs_rssi_ctl1 = MS(rxsp->status1, AR_RxRSSIAnt01);
-	rxs->rs_rssi_ctl2 = MS(rxsp->status1, AR_RxRSSIAnt02);
-	rxs->rs_rssi_ext0 = MS(rxsp->status5, AR_RxRSSIAnt10);
-	rxs->rs_rssi_ext1 = MS(rxsp->status5, AR_RxRSSIAnt11);
-	rxs->rs_rssi_ext2 = MS(rxsp->status5, AR_RxRSSIAnt12);
+	rxs->rs_rssi_ctl[0] = MS(rxsp->status1, AR_RxRSSIAnt00);
+	rxs->rs_rssi_ctl[1] = MS(rxsp->status1, AR_RxRSSIAnt01);
+	rxs->rs_rssi_ctl[2] = MS(rxsp->status1, AR_RxRSSIAnt02);
+	rxs->rs_rssi_ext[0] = MS(rxsp->status5, AR_RxRSSIAnt10);
+	rxs->rs_rssi_ext[1] = MS(rxsp->status5, AR_RxRSSIAnt11);
+	rxs->rs_rssi_ext[2] = MS(rxsp->status5, AR_RxRSSIAnt12);
 
 	if (rxsp->status11 & AR_RxKeyIdxValid)
 		rxs->rs_keyix = MS(rxsp->status11, AR_KeyIdx);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index d39b79f5e841..09facba1dc6d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -103,7 +103,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
 			} else {
 				channelSel = CHANSEL_2G(freq) >> 1;
 			}
-		} else if (AR_SREV_9550(ah)) {
+		} else if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
 			if (ah->is_clk_25mhz)
 				div = 75;
 			else
@@ -118,7 +118,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
 		/* Set to 2G mode */
 		bMode = 1;
 	} else {
-		if ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) &&
+		if ((AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) &&
 		    ah->is_clk_25mhz) {
 			channelSel = freq / 75;
 			chan_frac = ((freq % 75) * 0x20000) / 75;
@@ -641,11 +641,12 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
 		else
 			ah->enabled_cals &= ~TX_IQ_CAL;
 
-		if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE)
-			ah->enabled_cals |= TX_CL_CAL;
-		else
-			ah->enabled_cals &= ~TX_CL_CAL;
 	}
+
+	if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE)
+		ah->enabled_cals |= TX_CL_CAL;
+	else
+		ah->enabled_cals &= ~TX_CL_CAL;
 }
 
 static void ar9003_hw_prog_ini(struct ath_hw *ah,
@@ -809,10 +810,12 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
 	/*
 	 * TXGAIN initvals.
 	 */
-	if (AR_SREV_9550(ah)) {
-		int modes_txgain_index;
+	if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
+		int modes_txgain_index = 1;
+
+		if (AR_SREV_9550(ah))
+			modes_txgain_index = ar9550_hw_get_modes_txgain_index(ah, chan);
 
-		modes_txgain_index = ar9550_hw_get_modes_txgain_index(ah, chan);
 		if (modes_txgain_index < 0)
 			return -EINVAL;
 
@@ -1331,6 +1334,7 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
 static void ar9003_hw_set_radar_params(struct ath_hw *ah,
 				       struct ath_hw_radar_conf *conf)
 {
+	unsigned int regWrites = 0;
 	u32 radar_0 = 0, radar_1 = 0;
 
 	if (!conf) {
@@ -1357,6 +1361,11 @@ static void ar9003_hw_set_radar_params(struct ath_hw *ah,
 		REG_SET_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
 	else
 		REG_CLR_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
+
+	if (AR_SREV_9300(ah) || AR_SREV_9340(ah) || AR_SREV_9580(ah)) {
+		REG_WRITE_ARRAY(&ah->ini_dfs,
+				IS_CHAN_HT40(ah->curchan) ? 2 : 1, regWrites);
+	}
 }
 
 static void ar9003_hw_set_radar_conf(struct ath_hw *ah)
@@ -1807,6 +1816,68 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
 	memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs));
 }
 
+/*
+ * Baseband Watchdog signatures:
+ *
+ * 0x04000539: BB hang when operating in HT40 DFS Channel.
+ *             Full chip reset is not required, but a recovery
+ *             mechanism is needed.
+ *
+ * 0x1300000a: Related to CAC deafness.
+ *             Chip reset is not required.
+ *
+ * 0x0400000a: Related to CAC deafness.
+ *             Full chip reset is required.
+ *
+ * 0x04000b09: RX state machine gets into an illegal state
+ *             when a packet with unsupported rate is received.
+ *             Full chip reset is required and PHY_RESTART has
+ *             to be disabled.
+ *
+ * 0x04000409: Packet stuck on receive.
+ *             Full chip reset is required for all chips except AR9340.
+ */
+
+/*
+ * ar9003_hw_bb_watchdog_check(): Returns true if a chip reset is required.
+ */
+bool ar9003_hw_bb_watchdog_check(struct ath_hw *ah)
+{
+	u32 val;
+
+	switch(ah->bb_watchdog_last_status) {
+	case 0x04000539:
+		val = REG_READ(ah, AR_PHY_RADAR_0);
+		val &= (~AR_PHY_RADAR_0_FIRPWR);
+		val |= SM(0x7f, AR_PHY_RADAR_0_FIRPWR);
+		REG_WRITE(ah, AR_PHY_RADAR_0, val);
+		udelay(1);
+		val = REG_READ(ah, AR_PHY_RADAR_0);
+		val &= ~AR_PHY_RADAR_0_FIRPWR;
+		val |= SM(AR9300_DFS_FIRPWR, AR_PHY_RADAR_0_FIRPWR);
+		REG_WRITE(ah, AR_PHY_RADAR_0, val);
+
+		return false;
+	case 0x1300000a:
+		return false;
+	case 0x0400000a:
+	case 0x04000b09:
+		return true;
+	case 0x04000409:
+		if (AR_SREV_9340(ah) || AR_SREV_9531(ah))
+			return false;
+		else
+			return true;
+	default:
+		/*
+		 * For any other unknown signatures, do a
+		 * full chip reset.
+		 */
+		return true;
+	}
+}
+EXPORT_SYMBOL(ar9003_hw_bb_watchdog_check);
+
 void ar9003_hw_bb_watchdog_config(struct ath_hw *ah)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
@@ -1923,6 +1994,7 @@ EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
 
 void ar9003_hw_disable_phy_restart(struct ath_hw *ah)
 {
+	u8 result;
 	u32 val;
 
 	/* While receiving unsupported rate frame rx state machine
@@ -1930,15 +2002,13 @@ void ar9003_hw_disable_phy_restart(struct ath_hw *ah)
 	 * state, BB would go hang. If RXSM is in 0xb state after
 	 * first bb panic, ensure to disable the phy_restart.
 	 */
-	if (!((MS(ah->bb_watchdog_last_status,
-		  AR_PHY_WATCHDOG_RX_OFDM_SM) == 0xb) ||
-	    ah->bb_hang_rx_ofdm))
-		return;
+	result = MS(ah->bb_watchdog_last_status, AR_PHY_WATCHDOG_RX_OFDM_SM);
 
-	ah->bb_hang_rx_ofdm = true;
-	val = REG_READ(ah, AR_PHY_RESTART);
-	val &= ~AR_PHY_RESTART_ENA;
-
-	REG_WRITE(ah, AR_PHY_RESTART, val);
+	if ((result == 0xb) || ah->bb_hang_rx_ofdm) {
+		ah->bb_hang_rx_ofdm = true;
+		val = REG_READ(ah, AR_PHY_RESTART);
+		val &= ~AR_PHY_RESTART_ENA;
+		REG_WRITE(ah, AR_PHY_RESTART, val);
+	}
 }
 EXPORT_SYMBOL(ar9003_hw_disable_phy_restart);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 2af667beb273..fd090b1f2d0f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -270,7 +270,7 @@
 #define AR_PHY_AGC              (AR_AGC_BASE + 0x14)
 #define AR_PHY_EXT_ATTEN_CTL_0  (AR_AGC_BASE + 0x18)
 #define AR_PHY_CCA_0            (AR_AGC_BASE + 0x1c)
-#define AR_PHY_EXT_CCA0         (AR_AGC_BASE + 0x20)
+#define AR_PHY_CCA_CTRL_0       (AR_AGC_BASE + 0x20)
 #define AR_PHY_RESTART          (AR_AGC_BASE + 0x24)
 
 /*
@@ -338,17 +338,17 @@
 #define AR_PHY_CCA_NOM_VAL_9300_5GHZ          -115
 #define AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ     -125
 #define AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ     -125
-#define AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ     -95
-#define AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ     -100
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ     -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ     -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_FCC_2GHZ -95
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_FCC_5GHZ -100
 
 #define AR_PHY_CCA_NOM_VAL_9462_2GHZ          -127
 #define AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ     -127
 #define AR_PHY_CCA_MAX_GOOD_VAL_9462_2GHZ     -60
-#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ -95
 #define AR_PHY_CCA_NOM_VAL_9462_5GHZ          -127
 #define AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ     -127
 #define AR_PHY_CCA_MAX_GOOD_VAL_9462_5GHZ     -60
-#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ -100
 
 #define AR_PHY_CCA_NOM_VAL_9330_2GHZ          -118
 
@@ -397,6 +397,8 @@
 #define AR9280_PHY_CCA_THRESH62_S   12
 #define AR_PHY_EXT_CCA0_THRESH62    0x000000FF
 #define AR_PHY_EXT_CCA0_THRESH62_S  0
+#define AR_PHY_EXT_CCA0_THRESH62_1    0x000001FF
+#define AR_PHY_EXT_CCA0_THRESH62_1_S  0
 #define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK          0x0000003F
 #define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S        0
 #define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME           0x00001FC0
@@ -667,6 +669,16 @@
 #define AR_PHY_65NM_CH1_RXTX4       0x1650c
 #define AR_PHY_65NM_CH2_RXTX4       0x1690c
 
+#define AR_PHY_65NM_CH0_BB1         0x16140
+#define AR_PHY_65NM_CH0_BB2         0x16144
+#define AR_PHY_65NM_CH0_BB3         0x16148
+#define AR_PHY_65NM_CH1_BB1         0x16540
+#define AR_PHY_65NM_CH1_BB2         0x16544
+#define AR_PHY_65NM_CH1_BB3         0x16548
+#define AR_PHY_65NM_CH2_BB1         0x16940
+#define AR_PHY_65NM_CH2_BB2         0x16944
+#define AR_PHY_65NM_CH2_BB3         0x16948
+
 #define AR_PHY_65NM_CH0_SYNTH12_VREFMUL3           0x00780000
 #define AR_PHY_65NM_CH0_SYNTH12_VREFMUL3_S         19
 #define AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK         0x00000004
@@ -1331,4 +1343,6 @@
 #define AR_PHY_65NM_RXRF_AGC_AGC_OUT                   0x00000004
 #define AR_PHY_65NM_RXRF_AGC_AGC_OUT_S                 2
 
+#define AR9300_DFS_FIRPWR -28
+
 #endif  /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_wow.c b/drivers/net/wireless/ath/ath9k/ar9003_wow.c
new file mode 100644
index 000000000000..81c88dd606dc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_wow.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "ath9k.h"
+#include "reg.h"
+#include "hw-ops.h"
+
+const char *ath9k_hw_wow_event_to_string(u32 wow_event)
+{
+	if (wow_event & AH_WOW_MAGIC_PATTERN_EN)
+		return "Magic pattern";
+	if (wow_event & AH_WOW_USER_PATTERN_EN)
+		return "User pattern";
+	if (wow_event & AH_WOW_LINK_CHANGE)
+		return "Link change";
+	if (wow_event & AH_WOW_BEACON_MISS)
+		return "Beacon miss";
+
+	return  "unknown reason";
+}
+EXPORT_SYMBOL(ath9k_hw_wow_event_to_string);
+
+static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+
+	REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+
+	/* set rx disable bit */
+	REG_WRITE(ah, AR_CR, AR_CR_RXD);
+
+	if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0, AH_WAIT_TIMEOUT)) {
+		ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
+			REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
+		return;
+	}
+
+	REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
+}
+
+static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
+{
+	struct ath_common *common = ath9k_hw_common(ah);
+	u8 sta_mac_addr[ETH_ALEN], ap_mac_addr[ETH_ALEN];
+	u32 ctl[13] = {0};
+	u32 data_word[KAL_NUM_DATA_WORDS];
+	u8 i;
+	u32 wow_ka_data_word0;
+
+	memcpy(sta_mac_addr, common->macaddr, ETH_ALEN);
+	memcpy(ap_mac_addr, common->curbssid, ETH_ALEN);
+
+	/* set the transmit buffer */
+	ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16));
+	ctl[1] = 0;
+	ctl[3] = 0xb;	/* OFDM_6M hardware value for this rate */
+	ctl[4] = 0;
+	ctl[7] = (ah->txchainmask) << 2;
+	ctl[2] = 0xf << 16; /* tx_tries 0 */
+
+	for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
+		REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
+
+	REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
+
+	data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
+		       (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
+	data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
+		       (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
+	data_word[2] = (sta_mac_addr[1] << 24) | (sta_mac_addr[0] << 16) |
+		       (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
+	data_word[3] = (sta_mac_addr[5] << 24) | (sta_mac_addr[4] << 16) |
+		       (sta_mac_addr[3] << 8) | (sta_mac_addr[2]);
+	data_word[4] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
+		       (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
+	data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
+
+	if (AR_SREV_9462_20(ah)) {
+		/* AR9462 2.0 has an extra descriptor word (time based
+		 * discard) compared to other chips */
+		REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
+		wow_ka_data_word0 = AR_WOW_TXBUF(13);
+	} else {
+		wow_ka_data_word0 = AR_WOW_TXBUF(12);
+	}
+
+	for (i = 0; i < KAL_NUM_DATA_WORDS; i++)
+		REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]);
+
+}
+
+void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
+				u8 *user_mask, int pattern_count,
+				int pattern_len)
+{
+	int i;
+	u32 pattern_val, mask_val;
+	u32 set, clr;
+
+	/* FIXME: should check count by querying the hardware capability */
+	if (pattern_count >= MAX_NUM_PATTERN)
+		return;
+
+	REG_SET_BIT(ah, AR_WOW_PATTERN, BIT(pattern_count));
+
+	/* set the registers for pattern */
+	for (i = 0; i < MAX_PATTERN_SIZE; i += 4) {
+		memcpy(&pattern_val, user_pattern, 4);
+		REG_WRITE(ah, (AR_WOW_TB_PATTERN(pattern_count) + i),
+			  pattern_val);
+		user_pattern += 4;
+	}
+
+	/* set the registers for mask */
+	for (i = 0; i < MAX_PATTERN_MASK_SIZE; i += 4) {
+		memcpy(&mask_val, user_mask, 4);
+		REG_WRITE(ah, (AR_WOW_TB_MASK(pattern_count) + i), mask_val);
+		user_mask += 4;
+	}
+
+	/* set the pattern length to be matched
+	 *
+	 * AR_WOW_LENGTH1_REG1
+	 * bit 31:24 pattern 0 length
+	 * bit 23:16 pattern 1 length
+	 * bit 15:8 pattern 2 length
+	 * bit 7:0 pattern 3 length
+	 *
+	 * AR_WOW_LENGTH1_REG2
+	 * bit 31:24 pattern 4 length
+	 * bit 23:16 pattern 5 length
+	 * bit 15:8 pattern 6 length
+	 * bit 7:0 pattern 7 length
+	 *
+	 * the below logic writes out the new
+	 * pattern length for the corresponding
+	 * pattern_count, while masking out the
+	 * other fields
+	 */
+
+	ah->wow_event_mask |= BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
+
+	if (pattern_count < 4) {
+		/* Pattern 0-3 uses AR_WOW_LENGTH1 register */
+		set = (pattern_len & AR_WOW_LENGTH_MAX) <<
+		       AR_WOW_LEN1_SHIFT(pattern_count);
+		clr = AR_WOW_LENGTH1_MASK(pattern_count);
+		REG_RMW(ah, AR_WOW_LENGTH1, set, clr);
+	} else {
+		/* Pattern 4-7 uses AR_WOW_LENGTH2 register */
+		set = (pattern_len & AR_WOW_LENGTH_MAX) <<
+		       AR_WOW_LEN2_SHIFT(pattern_count);
+		clr = AR_WOW_LENGTH2_MASK(pattern_count);
+		REG_RMW(ah, AR_WOW_LENGTH2, set, clr);
+	}
+
+}
+EXPORT_SYMBOL(ath9k_hw_wow_apply_pattern);
+
+u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
+{
+	u32 wow_status = 0;
+	u32 val = 0, rval;
+
+	/*
+	 * read the WoW status register to know
+	 * the wakeup reason
+	 */
+	rval = REG_READ(ah, AR_WOW_PATTERN);
+	val = AR_WOW_STATUS(rval);
+
+	/*
+	 * mask only the WoW events that we have enabled. Sometimes
+	 * we have spurious WoW events from the AR_WOW_PATTERN
+	 * register. This mask will clean it up.
+	 */
+
+	val &= ah->wow_event_mask;
+
+	if (val) {
+		if (val & AR_WOW_MAGIC_PAT_FOUND)
+			wow_status |= AH_WOW_MAGIC_PATTERN_EN;
+		if (AR_WOW_PATTERN_FOUND(val))
+			wow_status |= AH_WOW_USER_PATTERN_EN;
+		if (val & AR_WOW_KEEP_ALIVE_FAIL)
+			wow_status |= AH_WOW_LINK_CHANGE;
+		if (val & AR_WOW_BEACON_FAIL)
+			wow_status |= AH_WOW_BEACON_MISS;
+	}
+
+	/*
+	 * set and clear WOW_PME_CLEAR registers for the chip to
+	 * generate next wow signal.
+	 * disable D3 before accessing other registers ?
+	 */
+
+	/* do we need to check the bit value 0x01000000 (7-10) ?? */
+	REG_RMW(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR,
+		AR_PMCTRL_PWR_STATE_D1D3);
+
+	/*
+	 * clear all events
+	 */
+	REG_WRITE(ah, AR_WOW_PATTERN,
+		  AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
+
+	/*
+	 * restore the beacon threshold to init value
+	 */
+	REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
+
+	/*
+	 * Restore the way the PCI-E reset, Power-On-Reset, external
+	 * PCIE_POR_SHORT pins are tied to its original value.
+	 * Previously just before WoW sleep, we untie the PCI-E
+	 * reset to our Chip's Power On Reset so that any PCI-E
+	 * reset from the bus will not reset our chip
+	 */
+	if (ah->is_pciexpress)
+		ath9k_hw_configpcipowersave(ah, false);
+
+	ah->wow_event_mask = 0;
+
+	return wow_status;
+}
+EXPORT_SYMBOL(ath9k_hw_wow_wakeup);
+
+void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
+{
+	u32 wow_event_mask;
+	u32 set, clr;
+
+	/*
+	 * wow_event_mask is a mask to the AR_WOW_PATTERN register to
+	 * indicate which WoW events we have enabled. The WoW events
+	 * are from the 'pattern_enable' in this function and
+	 * 'pattern_count' of ath9k_hw_wow_apply_pattern()
+	 */
+	wow_event_mask = ah->wow_event_mask;
+
+	/*
+	 * Untie Power-on-Reset from the PCI-E-Reset. When we are in
+	 * WOW sleep, we do want the Reset from the PCI-E to disturb
+	 * our hw state
+	 */
+	if (ah->is_pciexpress) {
+		/*
+		 * we need to untie the internal POR (power-on-reset)
+		 * to the external PCI-E reset. We also need to tie
+		 * the PCI-E Phy reset to the PCI-E reset.
+		 */
+		set = AR_WA_RESET_EN | AR_WA_POR_SHORT;
+		clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE;
+		REG_RMW(ah, AR_WA, set, clr);
+	}
+
+	/*
+	 * set the power states appropriately and enable PME
+	 */
+	set = AR_PMCTRL_HOST_PME_EN | AR_PMCTRL_PWR_PM_CTRL_ENA |
+	      AR_PMCTRL_AUX_PWR_DET | AR_PMCTRL_WOW_PME_CLR;
+
+	/*
+	 * set and clear WOW_PME_CLEAR registers for the chip
+	 * to generate next wow signal.
+	 */
+	REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
+	clr = AR_PMCTRL_WOW_PME_CLR;
+	REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
+
+	/*
+	 * Setup for:
+	 *	- beacon misses
+	 *	- magic pattern
+	 *	- keep alive timeout
+	 *	- pattern matching
+	 */
+
+	/*
+	 * Program default values for pattern backoff, aifs/slot/KAL count,
+	 * beacon miss timeout, KAL timeout, etc.
+	 */
+	set = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF);
+	REG_SET_BIT(ah, AR_WOW_PATTERN, set);
+
+	set = AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) |
+	      AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) |
+	      AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT);
+	REG_SET_BIT(ah, AR_WOW_COUNT, set);
+
+	if (pattern_enable & AH_WOW_BEACON_MISS)
+		set = AR_WOW_BEACON_TIMO;
+	/* We are not using beacon miss, program a large value */
+	else
+		set = AR_WOW_BEACON_TIMO_MAX;
+
+	REG_WRITE(ah, AR_WOW_BCN_TIMO, set);
+
+	/*
+	 * Keep alive timo in ms except AR9280
+	 */
+	if (!pattern_enable)
+		set = AR_WOW_KEEP_ALIVE_NEVER;
+	else
+		set = KAL_TIMEOUT * 32;
+
+	REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, set);
+
+	/*
+	 * Keep alive delay in us. based on 'power on clock',
+	 * therefore in usec
+	 */
+	set = KAL_DELAY * 1000;
+	REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY, set);
+
+	/*
+	 * Create keep alive pattern to respond to beacons
+	 */
+	ath9k_wow_create_keep_alive_pattern(ah);
+
+	/*
+	 * Configure MAC WoW Registers
+	 */
+	set = 0;
+	/* Send keep alive timeouts anyway */
+	clr = AR_WOW_KEEP_ALIVE_AUTO_DIS;
+
+	if (pattern_enable & AH_WOW_LINK_CHANGE)
+		wow_event_mask |= AR_WOW_KEEP_ALIVE_FAIL;
+	else
+		set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
+
+	set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
+	REG_RMW(ah, AR_WOW_KEEP_ALIVE, set, clr);
+
+	/*
+	 * we are relying on a bmiss failure. ensure we have
+	 * enough threshold to prevent false positives
+	 */
+	REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR,
+		      AR_WOW_BMISSTHRESHOLD);
+
+	set = 0;
+	clr = 0;
+
+	if (pattern_enable & AH_WOW_BEACON_MISS) {
+		set = AR_WOW_BEACON_FAIL_EN;
+		wow_event_mask |= AR_WOW_BEACON_FAIL;
+	} else {
+		clr = AR_WOW_BEACON_FAIL_EN;
+	}
+
+	REG_RMW(ah, AR_WOW_BCN_EN, set, clr);
+
+	set = 0;
+	clr = 0;
+	/*
+	 * Enable the magic packet registers
+	 */
+	if (pattern_enable & AH_WOW_MAGIC_PATTERN_EN) {
+		set = AR_WOW_MAGIC_EN;
+		wow_event_mask |= AR_WOW_MAGIC_PAT_FOUND;
+	} else {
+		clr = AR_WOW_MAGIC_EN;
+	}
+	set |= AR_WOW_MAC_INTR_EN;
+	REG_RMW(ah, AR_WOW_PATTERN, set, clr);
+
+	REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
+		  AR_WOW_PATTERN_SUPPORTED);
+
+	/*
+	 * Set the power states appropriately and enable PME
+	 */
+	clr = 0;
+	set = AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN |
+	      AR_PMCTRL_PWR_PM_CTRL_ENA;
+
+	clr = AR_PCIE_PM_CTRL_ENA;
+	REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr);
+
+	/*
+	 * this is needed to prevent the chip waking up
+	 * the host within 3-4 seconds with certain
+	 * platform/BIOS. The fix is to enable
+	 * D1 & D3 to match original definition and
+	 * also match the OTP value. Anyway this
+	 * is more related to SW WOW.
+	 */
+	clr = AR_PMCTRL_PWR_STATE_D1D3;
+	REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
+
+	set = AR_PMCTRL_PWR_STATE_D1D3_REAL;
+	REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
+
+	REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
+
+	/* to bring down WOW power low margin */
+	set = BIT(13);
+	REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set);
+	/* HW WoW */
+	clr = BIT(5);
+	REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr);
+
+	ath9k_hw_set_powermode_wow_sleep(ah);
+	ah->wow_event_mask = wow_event_mask;
+}
+EXPORT_SYMBOL(ath9k_hw_wow_enable);
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index 6e1756bc3833..f76139bbb74f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -18,6 +18,10 @@
 #ifndef INITVALS_9330_1P1_H
 #define INITVALS_9330_1P1_H
 
+#define ar9331_1p1_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
+#define ar9331_modes_high_power_tx_gain_1p1 ar9331_modes_lowest_ob_db_tx_gain_1p1
+
 static const u32 ar9331_1p1_baseband_postamble[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
@@ -55,7 +59,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
 	{0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
 	{0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
 	{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
 	{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -252,7 +256,7 @@ static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
 	{0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
 	{0x0000a2e4, 0xfffff000, 0xfffff000, 0xfffff000, 0xfffff000},
 	{0x0000a2e8, 0xfffe0000, 0xfffe0000, 0xfffe0000, 0xfffe0000},
-	{0x0000a410, 0x000050d7, 0x000050d7, 0x000050d0, 0x000050d0},
+	{0x0000a410, 0x000050d7, 0x000050d7, 0x000050d4, 0x000050d4},
 	{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
 	{0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
 	{0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
@@ -337,8 +341,6 @@ static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
 	{0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000},
 };
 
-#define ar9331_1p1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
-
 static const u32 ar9331_1p1_xtal_25M[][2] = {
 	/* Addr      allmodes  */
 	{0x00007038, 0x000002f8},
@@ -373,17 +375,17 @@ static const u32 ar9331_1p1_radio_core[][2] = {
 	{0x000160b4, 0x92480040},
 	{0x000160c0, 0x006db6db},
 	{0x000160c4, 0x0186db60},
-	{0x000160c8, 0x6db4db6c},
+	{0x000160c8, 0x6db6db6c},
 	{0x000160cc, 0x6de6c300},
 	{0x000160d0, 0x14500820},
 	{0x00016100, 0x04cb0001},
 	{0x00016104, 0xfff80015},
 	{0x00016108, 0x00080010},
 	{0x0001610c, 0x00170000},
-	{0x00016140, 0x10800000},
+	{0x00016140, 0x50804000},
 	{0x00016144, 0x01884080},
 	{0x00016148, 0x000080c0},
-	{0x00016280, 0x01000015},
+	{0x00016280, 0x01001015},
 	{0x00016284, 0x14d20000},
 	{0x00016288, 0x00318000},
 	{0x0001628c, 0x50000000},
@@ -622,12 +624,12 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
 	{0x0000a370, 0x00000000},
 	{0x0000a390, 0x00000001},
 	{0x0000a394, 0x00000444},
-	{0x0000a398, 0x001f0e0f},
-	{0x0000a39c, 0x0075393f},
-	{0x0000a3a0, 0xb79f6427},
-	{0x0000a3a4, 0x00000000},
-	{0x0000a3a8, 0xaaaaaaaa},
-	{0x0000a3ac, 0x3c466478},
+	{0x0000a398, 0x00000000},
+	{0x0000a39c, 0x210d0401},
+	{0x0000a3a0, 0xab9a7144},
+	{0x0000a3a4, 0x00000011},
+	{0x0000a3a8, 0x3c3c003d},
+	{0x0000a3ac, 0x30310030},
 	{0x0000a3c0, 0x20202020},
 	{0x0000a3c4, 0x22222220},
 	{0x0000a3c8, 0x20200020},
@@ -686,100 +688,18 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
 	{0x0000a7dc, 0x00000001},
 };
 
-static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
+static const u32 ar9331_1p1_mac_postamble[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
-	{0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
-	{0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
-	{0x0000a2e4, 0xfffff000, 0xfffff000, 0xfffff000, 0xfffff000},
-	{0x0000a2e8, 0xfffe0000, 0xfffe0000, 0xfffe0000, 0xfffe0000},
-	{0x0000a410, 0x000050d7, 0x000050d7, 0x000050d0, 0x000050d0},
-	{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
-	{0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
-	{0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
-	{0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
-	{0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
-	{0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
-	{0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
-	{0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
-	{0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
-	{0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
-	{0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
-	{0x0000a52c, 0x41023e85, 0x41023e85, 0x2d000a20, 0x2d000a20},
-	{0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000a22, 0x31000a22},
-	{0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000a24, 0x35000a24},
-	{0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000a43, 0x38000a43},
-	{0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3b000e42, 0x3b000e42},
-	{0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x3f000e44, 0x3f000e44},
-	{0x0000a544, 0x6502feca, 0x6502feca, 0x42000e64, 0x42000e64},
-	{0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46000e66, 0x46000e66},
-	{0x0000a54c, 0x7203feca, 0x7203feca, 0x4a000ea6, 0x4a000ea6},
-	{0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4a000ea6, 0x4a000ea6},
-	{0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4a000ea6, 0x4a000ea6},
-	{0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x4a000ea6, 0x4a000ea6},
-	{0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x4a000ea6, 0x4a000ea6},
-	{0x0000a560, 0x900fff0b, 0x900fff0b, 0x4a000ea6, 0x4a000ea6},
-	{0x0000a564, 0x960fffcb, 0x960fffcb, 0x4a000ea6, 0x4a000ea6},
-	{0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
-	{0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
-	{0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
-	{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
-	{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
-	{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
-	{0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
-	{0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
-	{0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
-	{0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
-	{0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
-	{0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
-	{0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
-	{0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
-	{0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
-	{0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
-	{0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
-	{0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
-	{0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
-	{0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
-	{0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
-	{0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
-	{0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
-	{0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
-	{0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
-	{0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
-	{0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
-	{0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
-	{0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
-	{0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
-	{0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
-	{0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
-	{0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
-	{0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
-	{0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
-	{0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
-	{0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
-	{0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
-	{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
-	{0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
-	{0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
-	{0x0000a620, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802},
-	{0x0000a624, 0x03010a03, 0x03010a03, 0x03010a03, 0x03010a03},
-	{0x0000a628, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
-	{0x0000a62c, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
-	{0x0000a630, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
-	{0x0000a634, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
-	{0x0000a638, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
-	{0x0000a63c, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
-	{0x00016044, 0x034922db, 0x034922db, 0x034922db, 0x034922db},
-	{0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000},
+	{0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+	{0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+	{0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+	{0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+	{0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+	{0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+	{0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+	{0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
 };
 
-#define ar9331_1p1_mac_postamble ar9300_2p2_mac_postamble
-
 static const u32 ar9331_1p1_soc_preamble[][2] = {
 	/* Addr      allmodes  */
 	{0x00007020, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
index 57ed8a112173..0ac8be96097f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
@@ -18,6 +18,28 @@
 #ifndef INITVALS_9330_1P2_H
 #define INITVALS_9330_1P2_H
 
+#define ar9331_modes_high_power_tx_gain_1p2 ar9331_modes_high_ob_db_tx_gain_1p2
+
+#define ar9331_modes_low_ob_db_tx_gain_1p2 ar9331_modes_high_ob_db_tx_gain_1p2
+
+#define ar9331_modes_lowest_ob_db_tx_gain_1p2 ar9331_modes_high_ob_db_tx_gain_1p2
+
+#define ar9331_1p2_baseband_core_txfir_coeff_japan_2484 ar9331_1p1_baseband_core_txfir_coeff_japan_2484
+
+#define ar9331_1p2_xtal_25M ar9331_1p1_xtal_25M
+
+#define ar9331_1p2_xtal_40M ar9331_1p1_xtal_40M
+
+#define ar9331_1p2_soc_postamble ar9331_1p1_soc_postamble
+
+#define ar9331_1p2_mac_postamble ar9331_1p1_mac_postamble
+
+#define ar9331_1p2_soc_preamble ar9331_1p1_soc_preamble
+
+#define ar9331_1p2_mac_core ar9331_1p1_mac_core
+
+#define ar9331_common_wo_xlna_rx_gain_1p2 ar9331_common_wo_xlna_rx_gain_1p1
+
 static const u32 ar9331_modes_high_ob_db_tx_gain_1p2[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
@@ -103,57 +125,6 @@ static const u32 ar9331_modes_high_ob_db_tx_gain_1p2[][5] = {
 	{0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
 };
 
-#define ar9331_modes_high_power_tx_gain_1p2 ar9331_modes_high_ob_db_tx_gain_1p2
-
-#define ar9331_modes_low_ob_db_tx_gain_1p2 ar9331_modes_high_power_tx_gain_1p2
-
-#define ar9331_modes_lowest_ob_db_tx_gain_1p2 ar9331_modes_low_ob_db_tx_gain_1p2
-
-static const u32 ar9331_1p2_baseband_postamble[][5] = {
-	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
-	{0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
-	{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
-	{0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
-	{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
-	{0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
-	{0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
-	{0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4},
-	{0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
-	{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
-	{0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e},
-	{0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
-	{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
-	{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
-	{0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
-	{0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
-	{0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
-	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
-	{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
-	{0x0000a204, 0x00003fc0, 0x00003fc4, 0x00003fc4, 0x00003fc0},
-	{0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
-	{0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
-	{0x0000a234, 0x00000fff, 0x00000fff, 0x10000fff, 0x00000fff},
-	{0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
-	{0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
-	{0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
-	{0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
-	{0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
-	{0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
-	{0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
-	{0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
-	{0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
-	{0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
-	{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
-	{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
-	{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-};
-
 static const u32 ar9331_1p2_radio_core[][2] = {
 	/* Addr      allmodes  */
 	{0x00016000, 0x36db6db6},
@@ -219,24 +190,318 @@ static const u32 ar9331_1p2_radio_core[][2] = {
 	{0x000163d4, 0x00000000},
 };
 
-#define ar9331_1p2_baseband_core_txfir_coeff_japan_2484 ar9331_1p1_baseband_core_txfir_coeff_japan_2484
-
-#define ar9331_1p2_xtal_25M ar9331_1p1_xtal_25M
-
-#define ar9331_1p2_xtal_40M ar9331_1p1_xtal_40M
-
-#define ar9331_1p2_baseband_core ar9331_1p1_baseband_core
-
-#define ar9331_1p2_soc_postamble ar9331_1p1_soc_postamble
-
-#define ar9331_1p2_mac_postamble ar9331_1p1_mac_postamble
-
-#define ar9331_1p2_soc_preamble ar9331_1p1_soc_preamble
-
-#define ar9331_1p2_mac_core ar9331_1p1_mac_core
+static const u32 ar9331_1p2_baseband_core[][2] = {
+	/* Addr      allmodes  */
+	{0x00009800, 0xafe68e30},
+	{0x00009804, 0xfd14e000},
+	{0x00009808, 0x9c0a8f6b},
+	{0x0000980c, 0x04800000},
+	{0x00009814, 0x9280c00a},
+	{0x00009818, 0x00000000},
+	{0x0000981c, 0x00020028},
+	{0x00009834, 0x5f3ca3de},
+	{0x00009838, 0x0108ecff},
+	{0x0000983c, 0x14750600},
+	{0x00009880, 0x201fff00},
+	{0x00009884, 0x00001042},
+	{0x000098a4, 0x00200400},
+	{0x000098b0, 0x32840bbe},
+	{0x000098d0, 0x004b6a8e},
+	{0x000098d4, 0x00000820},
+	{0x000098dc, 0x00000000},
+	{0x000098f0, 0x00000000},
+	{0x000098f4, 0x00000000},
+	{0x00009c04, 0x00000000},
+	{0x00009c08, 0x03200000},
+	{0x00009c0c, 0x00000000},
+	{0x00009c10, 0x00000000},
+	{0x00009c14, 0x00046384},
+	{0x00009c18, 0x05b6b440},
+	{0x00009c1c, 0x00b6b440},
+	{0x00009d00, 0xc080a333},
+	{0x00009d04, 0x40206c10},
+	{0x00009d08, 0x009c4060},
+	{0x00009d0c, 0x1883800a},
+	{0x00009d10, 0x01834061},
+	{0x00009d14, 0x00c00400},
+	{0x00009d18, 0x00000000},
+	{0x00009e08, 0x0038233c},
+	{0x00009e24, 0x9927b515},
+	{0x00009e28, 0x12ef0200},
+	{0x00009e30, 0x06336f77},
+	{0x00009e34, 0x6af6532f},
+	{0x00009e38, 0x0cc80c00},
+	{0x00009e40, 0x0d261820},
+	{0x00009e4c, 0x00001004},
+	{0x00009e50, 0x00ff03f1},
+	{0x00009fc0, 0x803e4788},
+	{0x00009fc4, 0x0001efb5},
+	{0x00009fcc, 0x40000014},
+	{0x0000a20c, 0x00000000},
+	{0x0000a220, 0x00000000},
+	{0x0000a224, 0x00000000},
+	{0x0000a228, 0x10002310},
+	{0x0000a23c, 0x00000000},
+	{0x0000a244, 0x0c000000},
+	{0x0000a2a0, 0x00000001},
+	{0x0000a2c0, 0x00000001},
+	{0x0000a2c8, 0x00000000},
+	{0x0000a2cc, 0x18c43433},
+	{0x0000a2d4, 0x00000000},
+	{0x0000a2dc, 0x00000000},
+	{0x0000a2e0, 0x00000000},
+	{0x0000a2e4, 0x00000000},
+	{0x0000a2e8, 0x00000000},
+	{0x0000a2ec, 0x00000000},
+	{0x0000a2f0, 0x00000000},
+	{0x0000a2f4, 0x00000000},
+	{0x0000a2f8, 0x00000000},
+	{0x0000a344, 0x00000000},
+	{0x0000a34c, 0x00000000},
+	{0x0000a350, 0x0000a000},
+	{0x0000a364, 0x00000000},
+	{0x0000a370, 0x00000000},
+	{0x0000a390, 0x00000001},
+	{0x0000a394, 0x00000444},
+	{0x0000a398, 0x001f0e0f},
+	{0x0000a39c, 0x0075393f},
+	{0x0000a3a0, 0xb79f6427},
+	{0x0000a3a4, 0x00000000},
+	{0x0000a3a8, 0xaaaaaaaa},
+	{0x0000a3ac, 0x3c466478},
+	{0x0000a3c0, 0x20202020},
+	{0x0000a3c4, 0x22222220},
+	{0x0000a3c8, 0x20200020},
+	{0x0000a3cc, 0x20202020},
+	{0x0000a3d0, 0x20202020},
+	{0x0000a3d4, 0x20202020},
+	{0x0000a3d8, 0x20202020},
+	{0x0000a3dc, 0x20202020},
+	{0x0000a3e0, 0x20202020},
+	{0x0000a3e4, 0x20202020},
+	{0x0000a3e8, 0x20202020},
+	{0x0000a3ec, 0x20202020},
+	{0x0000a3f0, 0x00000000},
+	{0x0000a3f4, 0x00000006},
+	{0x0000a3f8, 0x0cdbd380},
+	{0x0000a3fc, 0x000f0f01},
+	{0x0000a400, 0x8fa91f01},
+	{0x0000a404, 0x00000000},
+	{0x0000a408, 0x0e79e5c6},
+	{0x0000a40c, 0x00820820},
+	{0x0000a414, 0x1ce739ce},
+	{0x0000a418, 0x2d001dce},
+	{0x0000a41c, 0x1ce739ce},
+	{0x0000a420, 0x000001ce},
+	{0x0000a424, 0x1ce739ce},
+	{0x0000a428, 0x000001ce},
+	{0x0000a42c, 0x1ce739ce},
+	{0x0000a430, 0x1ce739ce},
+	{0x0000a434, 0x00000000},
+	{0x0000a438, 0x00001801},
+	{0x0000a43c, 0x00000000},
+	{0x0000a440, 0x00000000},
+	{0x0000a444, 0x00000000},
+	{0x0000a448, 0x04000000},
+	{0x0000a44c, 0x00000001},
+	{0x0000a450, 0x00010000},
+	{0x0000a458, 0x00000000},
+	{0x0000a640, 0x00000000},
+	{0x0000a644, 0x3fad9d74},
+	{0x0000a648, 0x0048060a},
+	{0x0000a64c, 0x00003c37},
+	{0x0000a670, 0x03020100},
+	{0x0000a674, 0x09080504},
+	{0x0000a678, 0x0d0c0b0a},
+	{0x0000a67c, 0x13121110},
+	{0x0000a680, 0x31301514},
+	{0x0000a684, 0x35343332},
+	{0x0000a688, 0x00000036},
+	{0x0000a690, 0x00000838},
+	{0x0000a7c0, 0x00000000},
+	{0x0000a7c4, 0xfffffffc},
+	{0x0000a7c8, 0x00000000},
+	{0x0000a7cc, 0x00000000},
+	{0x0000a7d0, 0x00000000},
+	{0x0000a7d4, 0x00000004},
+	{0x0000a7dc, 0x00000001},
+};
 
-#define ar9331_common_wo_xlna_rx_gain_1p2 ar9331_common_wo_xlna_rx_gain_1p1
+static const u32 ar9331_1p2_baseband_postamble[][5] = {
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+	{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
+	{0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
+	{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+	{0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+	{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+	{0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+	{0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
+	{0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4},
+	{0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+	{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+	{0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e},
+	{0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+	{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+	{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+	{0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
+	{0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+	{0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
+	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
+	{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+	{0x0000a204, 0x00003fc0, 0x00003fc4, 0x00003fc4, 0x00003fc0},
+	{0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+	{0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+	{0x0000a234, 0x00000fff, 0x00000fff, 0x10000fff, 0x00000fff},
+	{0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+	{0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+	{0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+	{0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+	{0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+	{0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
+	{0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+	{0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+	{0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+	{0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
+	{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+	{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+	{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
 
-#define ar9331_common_rx_gain_1p2 ar9485_common_rx_gain_1_1
+static const u32 ar9331_common_rx_gain_1p2[][2] = {
+	/* Addr      allmodes  */
+	{0x0000a000, 0x00010000},
+	{0x0000a004, 0x00030002},
+	{0x0000a008, 0x00050004},
+	{0x0000a00c, 0x00810080},
+	{0x0000a010, 0x01800082},
+	{0x0000a014, 0x01820181},
+	{0x0000a018, 0x01840183},
+	{0x0000a01c, 0x01880185},
+	{0x0000a020, 0x018a0189},
+	{0x0000a024, 0x02850284},
+	{0x0000a028, 0x02890288},
+	{0x0000a02c, 0x03850384},
+	{0x0000a030, 0x03890388},
+	{0x0000a034, 0x038b038a},
+	{0x0000a038, 0x038d038c},
+	{0x0000a03c, 0x03910390},
+	{0x0000a040, 0x03930392},
+	{0x0000a044, 0x03950394},
+	{0x0000a048, 0x00000396},
+	{0x0000a04c, 0x00000000},
+	{0x0000a050, 0x00000000},
+	{0x0000a054, 0x00000000},
+	{0x0000a058, 0x00000000},
+	{0x0000a05c, 0x00000000},
+	{0x0000a060, 0x00000000},
+	{0x0000a064, 0x00000000},
+	{0x0000a068, 0x00000000},
+	{0x0000a06c, 0x00000000},
+	{0x0000a070, 0x00000000},
+	{0x0000a074, 0x00000000},
+	{0x0000a078, 0x00000000},
+	{0x0000a07c, 0x00000000},
+	{0x0000a080, 0x28282828},
+	{0x0000a084, 0x28282828},
+	{0x0000a088, 0x28282828},
+	{0x0000a08c, 0x28282828},
+	{0x0000a090, 0x28282828},
+	{0x0000a094, 0x21212128},
+	{0x0000a098, 0x171c1c1c},
+	{0x0000a09c, 0x02020212},
+	{0x0000a0a0, 0x00000202},
+	{0x0000a0a4, 0x00000000},
+	{0x0000a0a8, 0x00000000},
+	{0x0000a0ac, 0x00000000},
+	{0x0000a0b0, 0x00000000},
+	{0x0000a0b4, 0x00000000},
+	{0x0000a0b8, 0x00000000},
+	{0x0000a0bc, 0x00000000},
+	{0x0000a0c0, 0x001f0000},
+	{0x0000a0c4, 0x111f1100},
+	{0x0000a0c8, 0x111d111e},
+	{0x0000a0cc, 0x111b111c},
+	{0x0000a0d0, 0x22032204},
+	{0x0000a0d4, 0x22012202},
+	{0x0000a0d8, 0x221f2200},
+	{0x0000a0dc, 0x221d221e},
+	{0x0000a0e0, 0x33013302},
+	{0x0000a0e4, 0x331f3300},
+	{0x0000a0e8, 0x4402331e},
+	{0x0000a0ec, 0x44004401},
+	{0x0000a0f0, 0x441e441f},
+	{0x0000a0f4, 0x55015502},
+	{0x0000a0f8, 0x551f5500},
+	{0x0000a0fc, 0x6602551e},
+	{0x0000a100, 0x66006601},
+	{0x0000a104, 0x661e661f},
+	{0x0000a108, 0x7703661d},
+	{0x0000a10c, 0x77017702},
+	{0x0000a110, 0x00007700},
+	{0x0000a114, 0x00000000},
+	{0x0000a118, 0x00000000},
+	{0x0000a11c, 0x00000000},
+	{0x0000a120, 0x00000000},
+	{0x0000a124, 0x00000000},
+	{0x0000a128, 0x00000000},
+	{0x0000a12c, 0x00000000},
+	{0x0000a130, 0x00000000},
+	{0x0000a134, 0x00000000},
+	{0x0000a138, 0x00000000},
+	{0x0000a13c, 0x00000000},
+	{0x0000a140, 0x001f0000},
+	{0x0000a144, 0x111f1100},
+	{0x0000a148, 0x111d111e},
+	{0x0000a14c, 0x111b111c},
+	{0x0000a150, 0x22032204},
+	{0x0000a154, 0x22012202},
+	{0x0000a158, 0x221f2200},
+	{0x0000a15c, 0x221d221e},
+	{0x0000a160, 0x33013302},
+	{0x0000a164, 0x331f3300},
+	{0x0000a168, 0x4402331e},
+	{0x0000a16c, 0x44004401},
+	{0x0000a170, 0x441e441f},
+	{0x0000a174, 0x55015502},
+	{0x0000a178, 0x551f5500},
+	{0x0000a17c, 0x6602551e},
+	{0x0000a180, 0x66006601},
+	{0x0000a184, 0x661e661f},
+	{0x0000a188, 0x7703661d},
+	{0x0000a18c, 0x77017702},
+	{0x0000a190, 0x00007700},
+	{0x0000a194, 0x00000000},
+	{0x0000a198, 0x00000000},
+	{0x0000a19c, 0x00000000},
+	{0x0000a1a0, 0x00000000},
+	{0x0000a1a4, 0x00000000},
+	{0x0000a1a8, 0x00000000},
+	{0x0000a1ac, 0x00000000},
+	{0x0000a1b0, 0x00000000},
+	{0x0000a1b4, 0x00000000},
+	{0x0000a1b8, 0x00000000},
+	{0x0000a1bc, 0x00000000},
+	{0x0000a1c0, 0x00000000},
+	{0x0000a1c4, 0x00000000},
+	{0x0000a1c8, 0x00000000},
+	{0x0000a1cc, 0x00000000},
+	{0x0000a1d0, 0x00000000},
+	{0x0000a1d4, 0x00000000},
+	{0x0000a1d8, 0x00000000},
+	{0x0000a1dc, 0x00000000},
+	{0x0000a1e0, 0x00000000},
+	{0x0000a1e4, 0x00000000},
+	{0x0000a1e8, 0x00000000},
+	{0x0000a1ec, 0x00000000},
+	{0x0000a1f0, 0x00000396},
+	{0x0000a1f4, 0x00000396},
+	{0x0000a1f8, 0x00000396},
+	{0x0000a1fc, 0x00000296},
+};
 
 #endif /* INITVALS_9330_1P2_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index 25db9215985a..a01f0edb6518 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -18,6 +18,20 @@
 #ifndef INITVALS_9340_H
 #define INITVALS_9340_H
 
+#define ar9340_1p0_mac_postamble ar9300_2p2_mac_postamble
+
+#define ar9340_1p0_soc_postamble ar9300_2p2_soc_postamble
+
+#define ar9340Modes_fast_clock_1p0 ar9300Modes_fast_clock_2p2
+
+#define ar9340Common_rx_gain_table_1p0 ar9300Common_rx_gain_table_2p2
+
+#define ar9340Common_wo_xlna_rx_gain_table_1p0 ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define ar9340_1p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
+#define ar9340_1p0_baseband_postamble_dfs_channel ar9300_2p2_baseband_postamble_dfs_channel
+
 static const u32 ar9340_1p0_radio_postamble[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x000160ac, 0xa4646800, 0xa4646800, 0xa4646800, 0xa4646800},
@@ -100,8 +114,6 @@ static const u32 ar9340Modes_lowest_ob_db_tx_gain_table_1p0[][5] = {
 	{0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
 };
 
-#define ar9340Modes_fast_clock_1p0 ar9300Modes_fast_clock_2p2
-
 static const u32 ar9340_1p0_radio_core[][2] = {
 	/* Addr      allmodes  */
 	{0x00016000, 0x36db6db6},
@@ -215,16 +227,12 @@ static const u32 ar9340_1p0_radio_core_40M[][2] = {
 	{0x0000824c, 0x0001e800},
 };
 
-#define ar9340_1p0_mac_postamble ar9300_2p2_mac_postamble
-
-#define ar9340_1p0_soc_postamble ar9300_2p2_soc_postamble
-
 static const u32 ar9340_1p0_baseband_postamble[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
 	{0x00009820, 0x206a022e, 0x206a022e, 0x206a022e, 0x206a022e},
 	{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
-	{0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+	{0x00009828, 0x06903081, 0x06903081, 0x09103881, 0x09103881},
 	{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
 	{0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
 	{0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
@@ -340,9 +348,9 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
 	{0x0000a370, 0x00000000},
 	{0x0000a390, 0x00000001},
 	{0x0000a394, 0x00000444},
-	{0x0000a398, 0x001f0e0f},
-	{0x0000a39c, 0x0075393f},
-	{0x0000a3a0, 0xb79f6427},
+	{0x0000a398, 0x00000000},
+	{0x0000a39c, 0x210d0401},
+	{0x0000a3a0, 0xab9a7144},
 	{0x0000a3a4, 0x00000000},
 	{0x0000a3a8, 0xaaaaaaaa},
 	{0x0000a3ac, 0x3c466478},
@@ -714,266 +722,6 @@ static const u32 ar9340Modes_ub124_tx_gain_table_1p0[][5] = {
 	{0x0000b2e8, 0xfffe0000, 0xfffe0000, 0xfffc0000, 0xfffc0000},
 };
 
-static const u32 ar9340Common_rx_gain_table_1p0[][2] = {
-	/* Addr      allmodes  */
-	{0x0000a000, 0x00010000},
-	{0x0000a004, 0x00030002},
-	{0x0000a008, 0x00050004},
-	{0x0000a00c, 0x00810080},
-	{0x0000a010, 0x00830082},
-	{0x0000a014, 0x01810180},
-	{0x0000a018, 0x01830182},
-	{0x0000a01c, 0x01850184},
-	{0x0000a020, 0x01890188},
-	{0x0000a024, 0x018b018a},
-	{0x0000a028, 0x018d018c},
-	{0x0000a02c, 0x01910190},
-	{0x0000a030, 0x01930192},
-	{0x0000a034, 0x01950194},
-	{0x0000a038, 0x038a0196},
-	{0x0000a03c, 0x038c038b},
-	{0x0000a040, 0x0390038d},
-	{0x0000a044, 0x03920391},
-	{0x0000a048, 0x03940393},
-	{0x0000a04c, 0x03960395},
-	{0x0000a050, 0x00000000},
-	{0x0000a054, 0x00000000},
-	{0x0000a058, 0x00000000},
-	{0x0000a05c, 0x00000000},
-	{0x0000a060, 0x00000000},
-	{0x0000a064, 0x00000000},
-	{0x0000a068, 0x00000000},
-	{0x0000a06c, 0x00000000},
-	{0x0000a070, 0x00000000},
-	{0x0000a074, 0x00000000},
-	{0x0000a078, 0x00000000},
-	{0x0000a07c, 0x00000000},
-	{0x0000a080, 0x22222229},
-	{0x0000a084, 0x1d1d1d1d},
-	{0x0000a088, 0x1d1d1d1d},
-	{0x0000a08c, 0x1d1d1d1d},
-	{0x0000a090, 0x171d1d1d},
-	{0x0000a094, 0x11111717},
-	{0x0000a098, 0x00030311},
-	{0x0000a09c, 0x00000000},
-	{0x0000a0a0, 0x00000000},
-	{0x0000a0a4, 0x00000000},
-	{0x0000a0a8, 0x00000000},
-	{0x0000a0ac, 0x00000000},
-	{0x0000a0b0, 0x00000000},
-	{0x0000a0b4, 0x00000000},
-	{0x0000a0b8, 0x00000000},
-	{0x0000a0bc, 0x00000000},
-	{0x0000a0c0, 0x001f0000},
-	{0x0000a0c4, 0x01000101},
-	{0x0000a0c8, 0x011e011f},
-	{0x0000a0cc, 0x011c011d},
-	{0x0000a0d0, 0x02030204},
-	{0x0000a0d4, 0x02010202},
-	{0x0000a0d8, 0x021f0200},
-	{0x0000a0dc, 0x0302021e},
-	{0x0000a0e0, 0x03000301},
-	{0x0000a0e4, 0x031e031f},
-	{0x0000a0e8, 0x0402031d},
-	{0x0000a0ec, 0x04000401},
-	{0x0000a0f0, 0x041e041f},
-	{0x0000a0f4, 0x0502041d},
-	{0x0000a0f8, 0x05000501},
-	{0x0000a0fc, 0x051e051f},
-	{0x0000a100, 0x06010602},
-	{0x0000a104, 0x061f0600},
-	{0x0000a108, 0x061d061e},
-	{0x0000a10c, 0x07020703},
-	{0x0000a110, 0x07000701},
-	{0x0000a114, 0x00000000},
-	{0x0000a118, 0x00000000},
-	{0x0000a11c, 0x00000000},
-	{0x0000a120, 0x00000000},
-	{0x0000a124, 0x00000000},
-	{0x0000a128, 0x00000000},
-	{0x0000a12c, 0x00000000},
-	{0x0000a130, 0x00000000},
-	{0x0000a134, 0x00000000},
-	{0x0000a138, 0x00000000},
-	{0x0000a13c, 0x00000000},
-	{0x0000a140, 0x001f0000},
-	{0x0000a144, 0x01000101},
-	{0x0000a148, 0x011e011f},
-	{0x0000a14c, 0x011c011d},
-	{0x0000a150, 0x02030204},
-	{0x0000a154, 0x02010202},
-	{0x0000a158, 0x021f0200},
-	{0x0000a15c, 0x0302021e},
-	{0x0000a160, 0x03000301},
-	{0x0000a164, 0x031e031f},
-	{0x0000a168, 0x0402031d},
-	{0x0000a16c, 0x04000401},
-	{0x0000a170, 0x041e041f},
-	{0x0000a174, 0x0502041d},
-	{0x0000a178, 0x05000501},
-	{0x0000a17c, 0x051e051f},
-	{0x0000a180, 0x06010602},
-	{0x0000a184, 0x061f0600},
-	{0x0000a188, 0x061d061e},
-	{0x0000a18c, 0x07020703},
-	{0x0000a190, 0x07000701},
-	{0x0000a194, 0x00000000},
-	{0x0000a198, 0x00000000},
-	{0x0000a19c, 0x00000000},
-	{0x0000a1a0, 0x00000000},
-	{0x0000a1a4, 0x00000000},
-	{0x0000a1a8, 0x00000000},
-	{0x0000a1ac, 0x00000000},
-	{0x0000a1b0, 0x00000000},
-	{0x0000a1b4, 0x00000000},
-	{0x0000a1b8, 0x00000000},
-	{0x0000a1bc, 0x00000000},
-	{0x0000a1c0, 0x00000000},
-	{0x0000a1c4, 0x00000000},
-	{0x0000a1c8, 0x00000000},
-	{0x0000a1cc, 0x00000000},
-	{0x0000a1d0, 0x00000000},
-	{0x0000a1d4, 0x00000000},
-	{0x0000a1d8, 0x00000000},
-	{0x0000a1dc, 0x00000000},
-	{0x0000a1e0, 0x00000000},
-	{0x0000a1e4, 0x00000000},
-	{0x0000a1e8, 0x00000000},
-	{0x0000a1ec, 0x00000000},
-	{0x0000a1f0, 0x00000396},
-	{0x0000a1f4, 0x00000396},
-	{0x0000a1f8, 0x00000396},
-	{0x0000a1fc, 0x00000196},
-	{0x0000b000, 0x00010000},
-	{0x0000b004, 0x00030002},
-	{0x0000b008, 0x00050004},
-	{0x0000b00c, 0x00810080},
-	{0x0000b010, 0x00830082},
-	{0x0000b014, 0x01810180},
-	{0x0000b018, 0x01830182},
-	{0x0000b01c, 0x01850184},
-	{0x0000b020, 0x02810280},
-	{0x0000b024, 0x02830282},
-	{0x0000b028, 0x02850284},
-	{0x0000b02c, 0x02890288},
-	{0x0000b030, 0x028b028a},
-	{0x0000b034, 0x0388028c},
-	{0x0000b038, 0x038a0389},
-	{0x0000b03c, 0x038c038b},
-	{0x0000b040, 0x0390038d},
-	{0x0000b044, 0x03920391},
-	{0x0000b048, 0x03940393},
-	{0x0000b04c, 0x03960395},
-	{0x0000b050, 0x00000000},
-	{0x0000b054, 0x00000000},
-	{0x0000b058, 0x00000000},
-	{0x0000b05c, 0x00000000},
-	{0x0000b060, 0x00000000},
-	{0x0000b064, 0x00000000},
-	{0x0000b068, 0x00000000},
-	{0x0000b06c, 0x00000000},
-	{0x0000b070, 0x00000000},
-	{0x0000b074, 0x00000000},
-	{0x0000b078, 0x00000000},
-	{0x0000b07c, 0x00000000},
-	{0x0000b080, 0x23232323},
-	{0x0000b084, 0x21232323},
-	{0x0000b088, 0x19191c1e},
-	{0x0000b08c, 0x12141417},
-	{0x0000b090, 0x07070e0e},
-	{0x0000b094, 0x03030305},
-	{0x0000b098, 0x00000003},
-	{0x0000b09c, 0x00000000},
-	{0x0000b0a0, 0x00000000},
-	{0x0000b0a4, 0x00000000},
-	{0x0000b0a8, 0x00000000},
-	{0x0000b0ac, 0x00000000},
-	{0x0000b0b0, 0x00000000},
-	{0x0000b0b4, 0x00000000},
-	{0x0000b0b8, 0x00000000},
-	{0x0000b0bc, 0x00000000},
-	{0x0000b0c0, 0x003f0020},
-	{0x0000b0c4, 0x00400041},
-	{0x0000b0c8, 0x0140005f},
-	{0x0000b0cc, 0x0160015f},
-	{0x0000b0d0, 0x017e017f},
-	{0x0000b0d4, 0x02410242},
-	{0x0000b0d8, 0x025f0240},
-	{0x0000b0dc, 0x027f0260},
-	{0x0000b0e0, 0x0341027e},
-	{0x0000b0e4, 0x035f0340},
-	{0x0000b0e8, 0x037f0360},
-	{0x0000b0ec, 0x04400441},
-	{0x0000b0f0, 0x0460045f},
-	{0x0000b0f4, 0x0541047f},
-	{0x0000b0f8, 0x055f0540},
-	{0x0000b0fc, 0x057f0560},
-	{0x0000b100, 0x06400641},
-	{0x0000b104, 0x0660065f},
-	{0x0000b108, 0x067e067f},
-	{0x0000b10c, 0x07410742},
-	{0x0000b110, 0x075f0740},
-	{0x0000b114, 0x077f0760},
-	{0x0000b118, 0x07800781},
-	{0x0000b11c, 0x07a0079f},
-	{0x0000b120, 0x07c107bf},
-	{0x0000b124, 0x000007c0},
-	{0x0000b128, 0x00000000},
-	{0x0000b12c, 0x00000000},
-	{0x0000b130, 0x00000000},
-	{0x0000b134, 0x00000000},
-	{0x0000b138, 0x00000000},
-	{0x0000b13c, 0x00000000},
-	{0x0000b140, 0x003f0020},
-	{0x0000b144, 0x00400041},
-	{0x0000b148, 0x0140005f},
-	{0x0000b14c, 0x0160015f},
-	{0x0000b150, 0x017e017f},
-	{0x0000b154, 0x02410242},
-	{0x0000b158, 0x025f0240},
-	{0x0000b15c, 0x027f0260},
-	{0x0000b160, 0x0341027e},
-	{0x0000b164, 0x035f0340},
-	{0x0000b168, 0x037f0360},
-	{0x0000b16c, 0x04400441},
-	{0x0000b170, 0x0460045f},
-	{0x0000b174, 0x0541047f},
-	{0x0000b178, 0x055f0540},
-	{0x0000b17c, 0x057f0560},
-	{0x0000b180, 0x06400641},
-	{0x0000b184, 0x0660065f},
-	{0x0000b188, 0x067e067f},
-	{0x0000b18c, 0x07410742},
-	{0x0000b190, 0x075f0740},
-	{0x0000b194, 0x077f0760},
-	{0x0000b198, 0x07800781},
-	{0x0000b19c, 0x07a0079f},
-	{0x0000b1a0, 0x07c107bf},
-	{0x0000b1a4, 0x000007c0},
-	{0x0000b1a8, 0x00000000},
-	{0x0000b1ac, 0x00000000},
-	{0x0000b1b0, 0x00000000},
-	{0x0000b1b4, 0x00000000},
-	{0x0000b1b8, 0x00000000},
-	{0x0000b1bc, 0x00000000},
-	{0x0000b1c0, 0x00000000},
-	{0x0000b1c4, 0x00000000},
-	{0x0000b1c8, 0x00000000},
-	{0x0000b1cc, 0x00000000},
-	{0x0000b1d0, 0x00000000},
-	{0x0000b1d4, 0x00000000},
-	{0x0000b1d8, 0x00000000},
-	{0x0000b1dc, 0x00000000},
-	{0x0000b1e0, 0x00000000},
-	{0x0000b1e4, 0x00000000},
-	{0x0000b1e8, 0x00000000},
-	{0x0000b1ec, 0x00000000},
-	{0x0000b1f0, 0x00000396},
-	{0x0000b1f4, 0x00000396},
-	{0x0000b1f8, 0x00000396},
-	{0x0000b1fc, 0x00000196},
-};
-
 static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
@@ -1437,8 +1185,6 @@ static const u32 ar9340_1p0_mac_core[][2] = {
 	{0x000083d0, 0x000101ff},
 };
 
-#define ar9340Common_wo_xlna_rx_gain_table_1p0 ar9300Common_wo_xlna_rx_gain_table_2p2
-
 static const u32 ar9340_1p0_soc_preamble[][2] = {
 	/* Addr      allmodes  */
 	{0x00007008, 0x00000000},
@@ -1447,4 +1193,106 @@ static const u32 ar9340_1p0_soc_preamble[][2] = {
 	{0x00007038, 0x000004c2},
 };
 
+static const u32 ar9340_cus227_tx_gain_table_1p0[][5] = {
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+	{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+	{0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+	{0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+	{0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+	{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+	{0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+	{0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+	{0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+	{0x0000a514, 0x1c000223, 0x1c000223, 0x11000400, 0x11000400},
+	{0x0000a518, 0x21002220, 0x21002220, 0x15000402, 0x15000402},
+	{0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+	{0x0000a520, 0x2c022220, 0x2c022220, 0x1b000603, 0x1b000603},
+	{0x0000a524, 0x30022222, 0x30022222, 0x1f000a02, 0x1f000a02},
+	{0x0000a528, 0x35022225, 0x35022225, 0x23000a04, 0x23000a04},
+	{0x0000a52c, 0x3b02222a, 0x3b02222a, 0x26000a20, 0x26000a20},
+	{0x0000a530, 0x3f02222c, 0x3f02222c, 0x2a000e20, 0x2a000e20},
+	{0x0000a534, 0x4202242a, 0x4202242a, 0x2e000e22, 0x2e000e22},
+	{0x0000a538, 0x4702244a, 0x4702244a, 0x31000e24, 0x31000e24},
+	{0x0000a53c, 0x4b02244c, 0x4b02244c, 0x34001640, 0x34001640},
+	{0x0000a540, 0x4e02246c, 0x4e02246c, 0x38001660, 0x38001660},
+	{0x0000a544, 0x5302266c, 0x5302266c, 0x3b001861, 0x3b001861},
+	{0x0000a548, 0x5702286c, 0x5702286c, 0x3e001a81, 0x3e001a81},
+	{0x0000a54c, 0x5c02486b, 0x5c02486b, 0x42001a83, 0x42001a83},
+	{0x0000a550, 0x61024a6c, 0x61024a6c, 0x44001c84, 0x44001c84},
+	{0x0000a554, 0x66026a6c, 0x66026a6c, 0x48001ce3, 0x48001ce3},
+	{0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x4c001ce5, 0x4c001ce5},
+	{0x0000a55c, 0x7002708c, 0x7002708c, 0x50001ce9, 0x50001ce9},
+	{0x0000a560, 0x7302b08a, 0x7302b08a, 0x54001ceb, 0x54001ceb},
+	{0x0000a564, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+	{0x0000a568, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+	{0x0000a56c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+	{0x0000a570, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+	{0x0000a574, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+	{0x0000a578, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+	{0x0000a57c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+	{0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+	{0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+	{0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+	{0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+	{0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+	{0x0000a594, 0x1c800223, 0x1c800223, 0x11800400, 0x11800400},
+	{0x0000a598, 0x21820220, 0x21820220, 0x15800402, 0x15800402},
+	{0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
+	{0x0000a5a0, 0x2b822220, 0x2b822220, 0x1b800603, 0x1b800603},
+	{0x0000a5a4, 0x2f822222, 0x2f822222, 0x1f800a02, 0x1f800a02},
+	{0x0000a5a8, 0x34822225, 0x34822225, 0x23800a04, 0x23800a04},
+	{0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x26800a20, 0x26800a20},
+	{0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2a800e20, 0x2a800e20},
+	{0x0000a5b4, 0x4282242a, 0x4282242a, 0x2e800e22, 0x2e800e22},
+	{0x0000a5b8, 0x4782244a, 0x4782244a, 0x31800e24, 0x31800e24},
+	{0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x34801640, 0x34801640},
+	{0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x38801660, 0x38801660},
+	{0x0000a5c4, 0x5382266c, 0x5382266c, 0x3b801861, 0x3b801861},
+	{0x0000a5c8, 0x5782286c, 0x5782286c, 0x3e801a81, 0x3e801a81},
+	{0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x42801a83, 0x42801a83},
+	{0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x44801c84, 0x44801c84},
+	{0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x48801ce3, 0x48801ce3},
+	{0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x4c801ce5, 0x4c801ce5},
+	{0x0000a5dc, 0x7086308c, 0x7086308c, 0x50801ce9, 0x50801ce9},
+	{0x0000a5e0, 0x738a308a, 0x738a308a, 0x54801ceb, 0x54801ceb},
+	{0x0000a5e4, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+	{0x0000a5e8, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+	{0x0000a5ec, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+	{0x0000a5f0, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+	{0x0000a5f4, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+	{0x0000a5f8, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+	{0x0000a5fc, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+	{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+	{0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+	{0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+	{0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+	{0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+	{0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+	{0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+	{0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+	{0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+	{0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+	{0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+	{0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+	{0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+	{0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+	{0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+	{0x00016044, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
+	{0x00016048, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
+	{0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
+	{0x00016288, 0x30318000, 0x30318000, 0x00318000, 0x00318000},
+	{0x00016444, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
+	{0x00016448, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
+	{0x0000a3a4, 0x00000011, 0x00000011, 0x00000011, 0x00000011},
+	{0x0000a3a8, 0x3c3c3c3c, 0x3c3c3c3c, 0x3c3c3c3c, 0x3c3c3c3c},
+	{0x0000a3ac, 0x30303030, 0x30303030, 0x30303030, 0x30303030},
+};
+
 #endif /* INITVALS_9340_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 092b9d412e7f..1cc13569b17b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -20,7 +20,15 @@
 
 /* AR9462 2.0 */
 
-static const u32 ar9462_modes_fast_clock_2p0[][3] = {
+#define ar9462_2p0_mac_postamble ar9331_1p1_mac_postamble
+
+#define ar9462_2p0_common_wo_xlna_rx_gain ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define ar9462_2p0_common_5g_xlna_only_rxgain ar9462_2p0_common_mixed_rx_gain
+
+#define ar9462_2p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
+static const u32 ar9462_2p0_modes_fast_clock[][3] = {
 	/* Addr      5G_HT20     5G_HT40   */
 	{0x00001030, 0x00000268, 0x000004d0},
 	{0x00001070, 0x0000018c, 0x00000318},
@@ -33,13 +41,6 @@ static const u32 ar9462_modes_fast_clock_2p0[][3] = {
 	{0x0000a254, 0x00000898, 0x00001130},
 };
 
-static const u32 ar9462_pciephy_clkreq_enable_L1_2p0[][2] = {
-	/* Addr      allmodes  */
-	{0x00018c00, 0x18253ede},
-	{0x00018c04, 0x000801d8},
-	{0x00018c08, 0x0003780c},
-};
-
 static const u32 ar9462_2p0_baseband_postamble[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
@@ -99,7 +100,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
 	{0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
 };
 
-static const u32 ar9462_common_rx_gain_table_2p0[][2] = {
+static const u32 ar9462_2p0_common_rx_gain[][2] = {
 	/* Addr      allmodes  */
 	{0x0000a000, 0x00010000},
 	{0x0000a004, 0x00030002},
@@ -359,20 +360,13 @@ static const u32 ar9462_common_rx_gain_table_2p0[][2] = {
 	{0x0000b1fc, 0x00000196},
 };
 
-static const u32 ar9462_pciephy_clkreq_disable_L1_2p0[][2] = {
+static const u32 ar9462_2p0_pciephy_clkreq_disable_L1[][2] = {
 	/* Addr      allmodes  */
 	{0x00018c00, 0x18213ede},
 	{0x00018c04, 0x000801d8},
 	{0x00018c08, 0x0003780c},
 };
 
-static const u32 ar9462_pciephy_pll_on_clkreq_disable_L1_2p0[][2] = {
-	/* Addr      allmodes  */
-	{0x00018c00, 0x18212ede},
-	{0x00018c04, 0x000801d8},
-	{0x00018c08, 0x0003780c},
-};
-
 static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
@@ -380,274 +374,7 @@ static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = {
 	{0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
 };
 
-static const u32 ar9462_common_wo_xlna_rx_gain_table_2p0[][2] = {
-	/* Addr      allmodes  */
-	{0x0000a000, 0x00010000},
-	{0x0000a004, 0x00030002},
-	{0x0000a008, 0x00050004},
-	{0x0000a00c, 0x00810080},
-	{0x0000a010, 0x00830082},
-	{0x0000a014, 0x01810180},
-	{0x0000a018, 0x01830182},
-	{0x0000a01c, 0x01850184},
-	{0x0000a020, 0x01890188},
-	{0x0000a024, 0x018b018a},
-	{0x0000a028, 0x018d018c},
-	{0x0000a02c, 0x03820190},
-	{0x0000a030, 0x03840383},
-	{0x0000a034, 0x03880385},
-	{0x0000a038, 0x038a0389},
-	{0x0000a03c, 0x038c038b},
-	{0x0000a040, 0x0390038d},
-	{0x0000a044, 0x03920391},
-	{0x0000a048, 0x03940393},
-	{0x0000a04c, 0x03960395},
-	{0x0000a050, 0x00000000},
-	{0x0000a054, 0x00000000},
-	{0x0000a058, 0x00000000},
-	{0x0000a05c, 0x00000000},
-	{0x0000a060, 0x00000000},
-	{0x0000a064, 0x00000000},
-	{0x0000a068, 0x00000000},
-	{0x0000a06c, 0x00000000},
-	{0x0000a070, 0x00000000},
-	{0x0000a074, 0x00000000},
-	{0x0000a078, 0x00000000},
-	{0x0000a07c, 0x00000000},
-	{0x0000a080, 0x29292929},
-	{0x0000a084, 0x29292929},
-	{0x0000a088, 0x29292929},
-	{0x0000a08c, 0x29292929},
-	{0x0000a090, 0x22292929},
-	{0x0000a094, 0x1d1d2222},
-	{0x0000a098, 0x0c111117},
-	{0x0000a09c, 0x00030303},
-	{0x0000a0a0, 0x00000000},
-	{0x0000a0a4, 0x00000000},
-	{0x0000a0a8, 0x00000000},
-	{0x0000a0ac, 0x00000000},
-	{0x0000a0b0, 0x00000000},
-	{0x0000a0b4, 0x00000000},
-	{0x0000a0b8, 0x00000000},
-	{0x0000a0bc, 0x00000000},
-	{0x0000a0c0, 0x001f0000},
-	{0x0000a0c4, 0x01000101},
-	{0x0000a0c8, 0x011e011f},
-	{0x0000a0cc, 0x011c011d},
-	{0x0000a0d0, 0x02030204},
-	{0x0000a0d4, 0x02010202},
-	{0x0000a0d8, 0x021f0200},
-	{0x0000a0dc, 0x0302021e},
-	{0x0000a0e0, 0x03000301},
-	{0x0000a0e4, 0x031e031f},
-	{0x0000a0e8, 0x0402031d},
-	{0x0000a0ec, 0x04000401},
-	{0x0000a0f0, 0x041e041f},
-	{0x0000a0f4, 0x0502041d},
-	{0x0000a0f8, 0x05000501},
-	{0x0000a0fc, 0x051e051f},
-	{0x0000a100, 0x06010602},
-	{0x0000a104, 0x061f0600},
-	{0x0000a108, 0x061d061e},
-	{0x0000a10c, 0x07020703},
-	{0x0000a110, 0x07000701},
-	{0x0000a114, 0x00000000},
-	{0x0000a118, 0x00000000},
-	{0x0000a11c, 0x00000000},
-	{0x0000a120, 0x00000000},
-	{0x0000a124, 0x00000000},
-	{0x0000a128, 0x00000000},
-	{0x0000a12c, 0x00000000},
-	{0x0000a130, 0x00000000},
-	{0x0000a134, 0x00000000},
-	{0x0000a138, 0x00000000},
-	{0x0000a13c, 0x00000000},
-	{0x0000a140, 0x001f0000},
-	{0x0000a144, 0x01000101},
-	{0x0000a148, 0x011e011f},
-	{0x0000a14c, 0x011c011d},
-	{0x0000a150, 0x02030204},
-	{0x0000a154, 0x02010202},
-	{0x0000a158, 0x021f0200},
-	{0x0000a15c, 0x0302021e},
-	{0x0000a160, 0x03000301},
-	{0x0000a164, 0x031e031f},
-	{0x0000a168, 0x0402031d},
-	{0x0000a16c, 0x04000401},
-	{0x0000a170, 0x041e041f},
-	{0x0000a174, 0x0502041d},
-	{0x0000a178, 0x05000501},
-	{0x0000a17c, 0x051e051f},
-	{0x0000a180, 0x06010602},
-	{0x0000a184, 0x061f0600},
-	{0x0000a188, 0x061d061e},
-	{0x0000a18c, 0x07020703},
-	{0x0000a190, 0x07000701},
-	{0x0000a194, 0x00000000},
-	{0x0000a198, 0x00000000},
-	{0x0000a19c, 0x00000000},
-	{0x0000a1a0, 0x00000000},
-	{0x0000a1a4, 0x00000000},
-	{0x0000a1a8, 0x00000000},
-	{0x0000a1ac, 0x00000000},
-	{0x0000a1b0, 0x00000000},
-	{0x0000a1b4, 0x00000000},
-	{0x0000a1b8, 0x00000000},
-	{0x0000a1bc, 0x00000000},
-	{0x0000a1c0, 0x00000000},
-	{0x0000a1c4, 0x00000000},
-	{0x0000a1c8, 0x00000000},
-	{0x0000a1cc, 0x00000000},
-	{0x0000a1d0, 0x00000000},
-	{0x0000a1d4, 0x00000000},
-	{0x0000a1d8, 0x00000000},
-	{0x0000a1dc, 0x00000000},
-	{0x0000a1e0, 0x00000000},
-	{0x0000a1e4, 0x00000000},
-	{0x0000a1e8, 0x00000000},
-	{0x0000a1ec, 0x00000000},
-	{0x0000a1f0, 0x00000396},
-	{0x0000a1f4, 0x00000396},
-	{0x0000a1f8, 0x00000396},
-	{0x0000a1fc, 0x00000196},
-	{0x0000b000, 0x00010000},
-	{0x0000b004, 0x00030002},
-	{0x0000b008, 0x00050004},
-	{0x0000b00c, 0x00810080},
-	{0x0000b010, 0x00830082},
-	{0x0000b014, 0x01810180},
-	{0x0000b018, 0x01830182},
-	{0x0000b01c, 0x01850184},
-	{0x0000b020, 0x02810280},
-	{0x0000b024, 0x02830282},
-	{0x0000b028, 0x02850284},
-	{0x0000b02c, 0x02890288},
-	{0x0000b030, 0x028b028a},
-	{0x0000b034, 0x0388028c},
-	{0x0000b038, 0x038a0389},
-	{0x0000b03c, 0x038c038b},
-	{0x0000b040, 0x0390038d},
-	{0x0000b044, 0x03920391},
-	{0x0000b048, 0x03940393},
-	{0x0000b04c, 0x03960395},
-	{0x0000b050, 0x00000000},
-	{0x0000b054, 0x00000000},
-	{0x0000b058, 0x00000000},
-	{0x0000b05c, 0x00000000},
-	{0x0000b060, 0x00000000},
-	{0x0000b064, 0x00000000},
-	{0x0000b068, 0x00000000},
-	{0x0000b06c, 0x00000000},
-	{0x0000b070, 0x00000000},
-	{0x0000b074, 0x00000000},
-	{0x0000b078, 0x00000000},
-	{0x0000b07c, 0x00000000},
-	{0x0000b080, 0x32323232},
-	{0x0000b084, 0x2f2f3232},
-	{0x0000b088, 0x23282a2d},
-	{0x0000b08c, 0x1c1e2123},
-	{0x0000b090, 0x14171919},
-	{0x0000b094, 0x0e0e1214},
-	{0x0000b098, 0x03050707},
-	{0x0000b09c, 0x00030303},
-	{0x0000b0a0, 0x00000000},
-	{0x0000b0a4, 0x00000000},
-	{0x0000b0a8, 0x00000000},
-	{0x0000b0ac, 0x00000000},
-	{0x0000b0b0, 0x00000000},
-	{0x0000b0b4, 0x00000000},
-	{0x0000b0b8, 0x00000000},
-	{0x0000b0bc, 0x00000000},
-	{0x0000b0c0, 0x003f0020},
-	{0x0000b0c4, 0x00400041},
-	{0x0000b0c8, 0x0140005f},
-	{0x0000b0cc, 0x0160015f},
-	{0x0000b0d0, 0x017e017f},
-	{0x0000b0d4, 0x02410242},
-	{0x0000b0d8, 0x025f0240},
-	{0x0000b0dc, 0x027f0260},
-	{0x0000b0e0, 0x0341027e},
-	{0x0000b0e4, 0x035f0340},
-	{0x0000b0e8, 0x037f0360},
-	{0x0000b0ec, 0x04400441},
-	{0x0000b0f0, 0x0460045f},
-	{0x0000b0f4, 0x0541047f},
-	{0x0000b0f8, 0x055f0540},
-	{0x0000b0fc, 0x057f0560},
-	{0x0000b100, 0x06400641},
-	{0x0000b104, 0x0660065f},
-	{0x0000b108, 0x067e067f},
-	{0x0000b10c, 0x07410742},
-	{0x0000b110, 0x075f0740},
-	{0x0000b114, 0x077f0760},
-	{0x0000b118, 0x07800781},
-	{0x0000b11c, 0x07a0079f},
-	{0x0000b120, 0x07c107bf},
-	{0x0000b124, 0x000007c0},
-	{0x0000b128, 0x00000000},
-	{0x0000b12c, 0x00000000},
-	{0x0000b130, 0x00000000},
-	{0x0000b134, 0x00000000},
-	{0x0000b138, 0x00000000},
-	{0x0000b13c, 0x00000000},
-	{0x0000b140, 0x003f0020},
-	{0x0000b144, 0x00400041},
-	{0x0000b148, 0x0140005f},
-	{0x0000b14c, 0x0160015f},
-	{0x0000b150, 0x017e017f},
-	{0x0000b154, 0x02410242},
-	{0x0000b158, 0x025f0240},
-	{0x0000b15c, 0x027f0260},
-	{0x0000b160, 0x0341027e},
-	{0x0000b164, 0x035f0340},
-	{0x0000b168, 0x037f0360},
-	{0x0000b16c, 0x04400441},
-	{0x0000b170, 0x0460045f},
-	{0x0000b174, 0x0541047f},
-	{0x0000b178, 0x055f0540},
-	{0x0000b17c, 0x057f0560},
-	{0x0000b180, 0x06400641},
-	{0x0000b184, 0x0660065f},
-	{0x0000b188, 0x067e067f},
-	{0x0000b18c, 0x07410742},
-	{0x0000b190, 0x075f0740},
-	{0x0000b194, 0x077f0760},
-	{0x0000b198, 0x07800781},
-	{0x0000b19c, 0x07a0079f},
-	{0x0000b1a0, 0x07c107bf},
-	{0x0000b1a4, 0x000007c0},
-	{0x0000b1a8, 0x00000000},
-	{0x0000b1ac, 0x00000000},
-	{0x0000b1b0, 0x00000000},
-	{0x0000b1b4, 0x00000000},
-	{0x0000b1b8, 0x00000000},
-	{0x0000b1bc, 0x00000000},
-	{0x0000b1c0, 0x00000000},
-	{0x0000b1c4, 0x00000000},
-	{0x0000b1c8, 0x00000000},
-	{0x0000b1cc, 0x00000000},
-	{0x0000b1d0, 0x00000000},
-	{0x0000b1d4, 0x00000000},
-	{0x0000b1d8, 0x00000000},
-	{0x0000b1dc, 0x00000000},
-	{0x0000b1e0, 0x00000000},
-	{0x0000b1e4, 0x00000000},
-	{0x0000b1e8, 0x00000000},
-	{0x0000b1ec, 0x00000000},
-	{0x0000b1f0, 0x00000396},
-	{0x0000b1f4, 0x00000396},
-	{0x0000b1f8, 0x00000396},
-	{0x0000b1fc, 0x00000196},
-};
-
-static const u32 ar9462_2p0_baseband_core_txfir_coeff_japan_2484[][2] = {
-	/* Addr      allmodes  */
-	{0x0000a398, 0x00000000},
-	{0x0000a39c, 0x6f7f0301},
-	{0x0000a3a0, 0xca9228ee},
-};
-
-static const u32 ar9462_modes_low_ob_db_tx_gain_table_2p0[][5] = {
+static const u32 ar9462_2p0_modes_low_ob_db_tx_gain[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
 	{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
@@ -879,7 +606,7 @@ static const u32 ar9462_2p0_radio_postamble[][5] = {
 	{0x0001650c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
 };
 
-static const u32 ar9462_modes_mix_ob_db_tx_gain_table_2p0[][5] = {
+static const u32 ar9462_2p0_modes_mix_ob_db_tx_gain[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
 	{0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
@@ -942,7 +669,7 @@ static const u32 ar9462_modes_mix_ob_db_tx_gain_table_2p0[][5] = {
 	{0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
 };
 
-static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = {
+static const u32 ar9462_2p0_modes_high_ob_db_tx_gain[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
 	{0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
@@ -1240,19 +967,7 @@ static const u32 ar9462_2p0_mac_core[][2] = {
 	{0x000083d0, 0x000301ff},
 };
 
-static const u32 ar9462_2p0_mac_postamble[][5] = {
-	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
-	{0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
-	{0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
-	{0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
-	{0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
-	{0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
-	{0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
-	{0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
-
-static const u32 ar9462_common_mixed_rx_gain_table_2p0[][2] = {
+static const u32 ar9462_2p0_common_mixed_rx_gain[][2] = {
 	/* Addr      allmodes  */
 	{0x0000a000, 0x00010000},
 	{0x0000a004, 0x00030002},
@@ -1517,266 +1232,6 @@ static const u32 ar9462_2p0_baseband_postamble_5g_xlna[][5] = {
 	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
 };
 
-static const u32 ar9462_2p0_5g_xlna_only_rxgain[][2] = {
-	/* Addr      allmodes  */
-	{0x0000a000, 0x00010000},
-	{0x0000a004, 0x00030002},
-	{0x0000a008, 0x00050004},
-	{0x0000a00c, 0x00810080},
-	{0x0000a010, 0x00830082},
-	{0x0000a014, 0x01810180},
-	{0x0000a018, 0x01830182},
-	{0x0000a01c, 0x01850184},
-	{0x0000a020, 0x01890188},
-	{0x0000a024, 0x018b018a},
-	{0x0000a028, 0x018d018c},
-	{0x0000a02c, 0x03820190},
-	{0x0000a030, 0x03840383},
-	{0x0000a034, 0x03880385},
-	{0x0000a038, 0x038a0389},
-	{0x0000a03c, 0x038c038b},
-	{0x0000a040, 0x0390038d},
-	{0x0000a044, 0x03920391},
-	{0x0000a048, 0x03940393},
-	{0x0000a04c, 0x03960395},
-	{0x0000a050, 0x00000000},
-	{0x0000a054, 0x00000000},
-	{0x0000a058, 0x00000000},
-	{0x0000a05c, 0x00000000},
-	{0x0000a060, 0x00000000},
-	{0x0000a064, 0x00000000},
-	{0x0000a068, 0x00000000},
-	{0x0000a06c, 0x00000000},
-	{0x0000a070, 0x00000000},
-	{0x0000a074, 0x00000000},
-	{0x0000a078, 0x00000000},
-	{0x0000a07c, 0x00000000},
-	{0x0000a080, 0x29292929},
-	{0x0000a084, 0x29292929},
-	{0x0000a088, 0x29292929},
-	{0x0000a08c, 0x29292929},
-	{0x0000a090, 0x22292929},
-	{0x0000a094, 0x1d1d2222},
-	{0x0000a098, 0x0c111117},
-	{0x0000a09c, 0x00030303},
-	{0x0000a0a0, 0x00000000},
-	{0x0000a0a4, 0x00000000},
-	{0x0000a0a8, 0x00000000},
-	{0x0000a0ac, 0x00000000},
-	{0x0000a0b0, 0x00000000},
-	{0x0000a0b4, 0x00000000},
-	{0x0000a0b8, 0x00000000},
-	{0x0000a0bc, 0x00000000},
-	{0x0000a0c0, 0x001f0000},
-	{0x0000a0c4, 0x01000101},
-	{0x0000a0c8, 0x011e011f},
-	{0x0000a0cc, 0x011c011d},
-	{0x0000a0d0, 0x02030204},
-	{0x0000a0d4, 0x02010202},
-	{0x0000a0d8, 0x021f0200},
-	{0x0000a0dc, 0x0302021e},
-	{0x0000a0e0, 0x03000301},
-	{0x0000a0e4, 0x031e031f},
-	{0x0000a0e8, 0x0402031d},
-	{0x0000a0ec, 0x04000401},
-	{0x0000a0f0, 0x041e041f},
-	{0x0000a0f4, 0x0502041d},
-	{0x0000a0f8, 0x05000501},
-	{0x0000a0fc, 0x051e051f},
-	{0x0000a100, 0x06010602},
-	{0x0000a104, 0x061f0600},
-	{0x0000a108, 0x061d061e},
-	{0x0000a10c, 0x07020703},
-	{0x0000a110, 0x07000701},
-	{0x0000a114, 0x00000000},
-	{0x0000a118, 0x00000000},
-	{0x0000a11c, 0x00000000},
-	{0x0000a120, 0x00000000},
-	{0x0000a124, 0x00000000},
-	{0x0000a128, 0x00000000},
-	{0x0000a12c, 0x00000000},
-	{0x0000a130, 0x00000000},
-	{0x0000a134, 0x00000000},
-	{0x0000a138, 0x00000000},
-	{0x0000a13c, 0x00000000},
-	{0x0000a140, 0x001f0000},
-	{0x0000a144, 0x01000101},
-	{0x0000a148, 0x011e011f},
-	{0x0000a14c, 0x011c011d},
-	{0x0000a150, 0x02030204},
-	{0x0000a154, 0x02010202},
-	{0x0000a158, 0x021f0200},
-	{0x0000a15c, 0x0302021e},
-	{0x0000a160, 0x03000301},
-	{0x0000a164, 0x031e031f},
-	{0x0000a168, 0x0402031d},
-	{0x0000a16c, 0x04000401},
-	{0x0000a170, 0x041e041f},
-	{0x0000a174, 0x0502041d},
-	{0x0000a178, 0x05000501},
-	{0x0000a17c, 0x051e051f},
-	{0x0000a180, 0x06010602},
-	{0x0000a184, 0x061f0600},
-	{0x0000a188, 0x061d061e},
-	{0x0000a18c, 0x07020703},
-	{0x0000a190, 0x07000701},
-	{0x0000a194, 0x00000000},
-	{0x0000a198, 0x00000000},
-	{0x0000a19c, 0x00000000},
-	{0x0000a1a0, 0x00000000},
-	{0x0000a1a4, 0x00000000},
-	{0x0000a1a8, 0x00000000},
-	{0x0000a1ac, 0x00000000},
-	{0x0000a1b0, 0x00000000},
-	{0x0000a1b4, 0x00000000},
-	{0x0000a1b8, 0x00000000},
-	{0x0000a1bc, 0x00000000},
-	{0x0000a1c0, 0x00000000},
-	{0x0000a1c4, 0x00000000},
-	{0x0000a1c8, 0x00000000},
-	{0x0000a1cc, 0x00000000},
-	{0x0000a1d0, 0x00000000},
-	{0x0000a1d4, 0x00000000},
-	{0x0000a1d8, 0x00000000},
-	{0x0000a1dc, 0x00000000},
-	{0x0000a1e0, 0x00000000},
-	{0x0000a1e4, 0x00000000},
-	{0x0000a1e8, 0x00000000},
-	{0x0000a1ec, 0x00000000},
-	{0x0000a1f0, 0x00000396},
-	{0x0000a1f4, 0x00000396},
-	{0x0000a1f8, 0x00000396},
-	{0x0000a1fc, 0x00000196},
-	{0x0000b000, 0x00010000},
-	{0x0000b004, 0x00030002},
-	{0x0000b008, 0x00050004},
-	{0x0000b00c, 0x00810080},
-	{0x0000b010, 0x00830082},
-	{0x0000b014, 0x01810180},
-	{0x0000b018, 0x01830182},
-	{0x0000b01c, 0x01850184},
-	{0x0000b020, 0x02810280},
-	{0x0000b024, 0x02830282},
-	{0x0000b028, 0x02850284},
-	{0x0000b02c, 0x02890288},
-	{0x0000b030, 0x028b028a},
-	{0x0000b034, 0x0388028c},
-	{0x0000b038, 0x038a0389},
-	{0x0000b03c, 0x038c038b},
-	{0x0000b040, 0x0390038d},
-	{0x0000b044, 0x03920391},
-	{0x0000b048, 0x03940393},
-	{0x0000b04c, 0x03960395},
-	{0x0000b050, 0x00000000},
-	{0x0000b054, 0x00000000},
-	{0x0000b058, 0x00000000},
-	{0x0000b05c, 0x00000000},
-	{0x0000b060, 0x00000000},
-	{0x0000b064, 0x00000000},
-	{0x0000b068, 0x00000000},
-	{0x0000b06c, 0x00000000},
-	{0x0000b070, 0x00000000},
-	{0x0000b074, 0x00000000},
-	{0x0000b078, 0x00000000},
-	{0x0000b07c, 0x00000000},
-	{0x0000b080, 0x2a2d2f32},
-	{0x0000b084, 0x21232328},
-	{0x0000b088, 0x19191c1e},
-	{0x0000b08c, 0x12141417},
-	{0x0000b090, 0x07070e0e},
-	{0x0000b094, 0x03030305},
-	{0x0000b098, 0x00000003},
-	{0x0000b09c, 0x00000000},
-	{0x0000b0a0, 0x00000000},
-	{0x0000b0a4, 0x00000000},
-	{0x0000b0a8, 0x00000000},
-	{0x0000b0ac, 0x00000000},
-	{0x0000b0b0, 0x00000000},
-	{0x0000b0b4, 0x00000000},
-	{0x0000b0b8, 0x00000000},
-	{0x0000b0bc, 0x00000000},
-	{0x0000b0c0, 0x003f0020},
-	{0x0000b0c4, 0x00400041},
-	{0x0000b0c8, 0x0140005f},
-	{0x0000b0cc, 0x0160015f},
-	{0x0000b0d0, 0x017e017f},
-	{0x0000b0d4, 0x02410242},
-	{0x0000b0d8, 0x025f0240},
-	{0x0000b0dc, 0x027f0260},
-	{0x0000b0e0, 0x0341027e},
-	{0x0000b0e4, 0x035f0340},
-	{0x0000b0e8, 0x037f0360},
-	{0x0000b0ec, 0x04400441},
-	{0x0000b0f0, 0x0460045f},
-	{0x0000b0f4, 0x0541047f},
-	{0x0000b0f8, 0x055f0540},
-	{0x0000b0fc, 0x057f0560},
-	{0x0000b100, 0x06400641},
-	{0x0000b104, 0x0660065f},
-	{0x0000b108, 0x067e067f},
-	{0x0000b10c, 0x07410742},
-	{0x0000b110, 0x075f0740},
-	{0x0000b114, 0x077f0760},
-	{0x0000b118, 0x07800781},
-	{0x0000b11c, 0x07a0079f},
-	{0x0000b120, 0x07c107bf},
-	{0x0000b124, 0x000007c0},
-	{0x0000b128, 0x00000000},
-	{0x0000b12c, 0x00000000},
-	{0x0000b130, 0x00000000},
-	{0x0000b134, 0x00000000},
-	{0x0000b138, 0x00000000},
-	{0x0000b13c, 0x00000000},
-	{0x0000b140, 0x003f0020},
-	{0x0000b144, 0x00400041},
-	{0x0000b148, 0x0140005f},
-	{0x0000b14c, 0x0160015f},
-	{0x0000b150, 0x017e017f},
-	{0x0000b154, 0x02410242},
-	{0x0000b158, 0x025f0240},
-	{0x0000b15c, 0x027f0260},
-	{0x0000b160, 0x0341027e},
-	{0x0000b164, 0x035f0340},
-	{0x0000b168, 0x037f0360},
-	{0x0000b16c, 0x04400441},
-	{0x0000b170, 0x0460045f},
-	{0x0000b174, 0x0541047f},
-	{0x0000b178, 0x055f0540},
-	{0x0000b17c, 0x057f0560},
-	{0x0000b180, 0x06400641},
-	{0x0000b184, 0x0660065f},
-	{0x0000b188, 0x067e067f},
-	{0x0000b18c, 0x07410742},
-	{0x0000b190, 0x075f0740},
-	{0x0000b194, 0x077f0760},
-	{0x0000b198, 0x07800781},
-	{0x0000b19c, 0x07a0079f},
-	{0x0000b1a0, 0x07c107bf},
-	{0x0000b1a4, 0x000007c0},
-	{0x0000b1a8, 0x00000000},
-	{0x0000b1ac, 0x00000000},
-	{0x0000b1b0, 0x00000000},
-	{0x0000b1b4, 0x00000000},
-	{0x0000b1b8, 0x00000000},
-	{0x0000b1bc, 0x00000000},
-	{0x0000b1c0, 0x00000000},
-	{0x0000b1c4, 0x00000000},
-	{0x0000b1c8, 0x00000000},
-	{0x0000b1cc, 0x00000000},
-	{0x0000b1d0, 0x00000000},
-	{0x0000b1d4, 0x00000000},
-	{0x0000b1d8, 0x00000000},
-	{0x0000b1dc, 0x00000000},
-	{0x0000b1e0, 0x00000000},
-	{0x0000b1e4, 0x00000000},
-	{0x0000b1e8, 0x00000000},
-	{0x0000b1ec, 0x00000000},
-	{0x0000b1f0, 0x00000396},
-	{0x0000b1f4, 0x00000396},
-	{0x0000b1f8, 0x00000396},
-	{0x0000b1fc, 0x00000196},
-};
-
 static const u32 ar9462_2p0_baseband_core_mix_rxgain[][2] = {
 	/* Addr      allmodes  */
 	{0x00009fd0, 0x0a2d6b93},
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
index 57fc5f459d0a..dc3adda46e8b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
@@ -20,6 +20,44 @@
 
 /* AR9462 2.1 */
 
+#define ar9462_2p1_mac_postamble ar9462_2p0_mac_postamble
+
+#define ar9462_2p1_baseband_core ar9462_2p0_baseband_core
+
+#define ar9462_2p1_radio_core ar9462_2p0_radio_core
+
+#define ar9462_2p1_radio_postamble ar9462_2p0_radio_postamble
+
+#define ar9462_2p1_soc_postamble ar9462_2p0_soc_postamble
+
+#define ar9462_2p1_radio_postamble_sys2ant ar9462_2p0_radio_postamble_sys2ant
+
+#define ar9462_2p1_common_rx_gain ar9462_2p0_common_rx_gain
+
+#define ar9462_2p1_common_mixed_rx_gain ar9462_2p0_common_mixed_rx_gain
+
+#define ar9462_2p1_common_5g_xlna_only_rxgain ar9462_2p0_common_5g_xlna_only_rxgain
+
+#define ar9462_2p1_baseband_core_mix_rxgain ar9462_2p0_baseband_core_mix_rxgain
+
+#define ar9462_2p1_baseband_postamble_mix_rxgain ar9462_2p0_baseband_postamble_mix_rxgain
+
+#define ar9462_2p1_baseband_postamble_5g_xlna ar9462_2p0_baseband_postamble_5g_xlna
+
+#define ar9462_2p1_common_wo_xlna_rx_gain ar9462_2p0_common_wo_xlna_rx_gain
+
+#define ar9462_2p1_modes_low_ob_db_tx_gain ar9462_2p0_modes_low_ob_db_tx_gain
+
+#define ar9462_2p1_modes_high_ob_db_tx_gain ar9462_2p0_modes_high_ob_db_tx_gain
+
+#define ar9462_2p1_modes_mix_ob_db_tx_gain ar9462_2p0_modes_mix_ob_db_tx_gain
+
+#define ar9462_2p1_modes_fast_clock ar9462_2p0_modes_fast_clock
+
+#define ar9462_2p1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
+
+#define ar9462_2p1_pciephy_clkreq_disable_L1 ar9462_2p0_pciephy_clkreq_disable_L1
+
 static const u32 ar9462_2p1_mac_core[][2] = {
 	/* Addr      allmodes  */
 	{0x00000008, 0x00000000},
@@ -183,168 +221,6 @@ static const u32 ar9462_2p1_mac_core[][2] = {
 	{0x000083d0, 0x000301ff},
 };
 
-static const u32 ar9462_2p1_mac_postamble[][5] = {
-	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
-	{0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
-	{0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
-	{0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
-	{0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
-	{0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
-	{0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
-	{0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
-
-static const u32 ar9462_2p1_baseband_core[][2] = {
-	/* Addr      allmodes  */
-	{0x00009800, 0xafe68e30},
-	{0x00009804, 0xfd14e000},
-	{0x00009808, 0x9c0a9f6b},
-	{0x0000980c, 0x04900000},
-	{0x00009814, 0x9280c00a},
-	{0x00009818, 0x00000000},
-	{0x0000981c, 0x00020028},
-	{0x00009834, 0x6400a290},
-	{0x00009838, 0x0108ecff},
-	{0x0000983c, 0x0d000600},
-	{0x00009880, 0x201fff00},
-	{0x00009884, 0x00001042},
-	{0x000098a4, 0x00200400},
-	{0x000098b0, 0x32440bbe},
-	{0x000098d0, 0x004b6a8e},
-	{0x000098d4, 0x00000820},
-	{0x000098dc, 0x00000000},
-	{0x000098e4, 0x01ffffff},
-	{0x000098e8, 0x01ffffff},
-	{0x000098ec, 0x01ffffff},
-	{0x000098f0, 0x00000000},
-	{0x000098f4, 0x00000000},
-	{0x00009bf0, 0x80000000},
-	{0x00009c04, 0xff55ff55},
-	{0x00009c08, 0x0320ff55},
-	{0x00009c0c, 0x00000000},
-	{0x00009c10, 0x00000000},
-	{0x00009c14, 0x00046384},
-	{0x00009c18, 0x05b6b440},
-	{0x00009c1c, 0x00b6b440},
-	{0x00009d00, 0xc080a333},
-	{0x00009d04, 0x40206c10},
-	{0x00009d08, 0x009c4060},
-	{0x00009d0c, 0x9883800a},
-	{0x00009d10, 0x01834061},
-	{0x00009d14, 0x00c0040b},
-	{0x00009d18, 0x00000000},
-	{0x00009e08, 0x0038230c},
-	{0x00009e24, 0x990bb515},
-	{0x00009e28, 0x0c6f0000},
-	{0x00009e30, 0x06336f77},
-	{0x00009e34, 0x6af6532f},
-	{0x00009e38, 0x0cc80c00},
-	{0x00009e40, 0x15262820},
-	{0x00009e4c, 0x00001004},
-	{0x00009e50, 0x00ff03f1},
-	{0x00009e54, 0xe4c555c2},
-	{0x00009e58, 0xfd857722},
-	{0x00009e5c, 0xe9198724},
-	{0x00009fc0, 0x803e4788},
-	{0x00009fc4, 0x0001efb5},
-	{0x00009fcc, 0x40000014},
-	{0x00009fd0, 0x0a193b93},
-	{0x0000a20c, 0x00000000},
-	{0x0000a220, 0x00000000},
-	{0x0000a224, 0x00000000},
-	{0x0000a228, 0x10002310},
-	{0x0000a23c, 0x00000000},
-	{0x0000a244, 0x0c000000},
-	{0x0000a2a0, 0x00000001},
-	{0x0000a2c0, 0x00000001},
-	{0x0000a2c8, 0x00000000},
-	{0x0000a2cc, 0x18c43433},
-	{0x0000a2d4, 0x00000000},
-	{0x0000a2ec, 0x00000000},
-	{0x0000a2f0, 0x00000000},
-	{0x0000a2f4, 0x00000000},
-	{0x0000a2f8, 0x00000000},
-	{0x0000a344, 0x00000000},
-	{0x0000a34c, 0x00000000},
-	{0x0000a350, 0x0000a000},
-	{0x0000a364, 0x00000000},
-	{0x0000a370, 0x00000000},
-	{0x0000a390, 0x00000001},
-	{0x0000a394, 0x00000444},
-	{0x0000a398, 0x001f0e0f},
-	{0x0000a39c, 0x0075393f},
-	{0x0000a3a0, 0xb79f6427},
-	{0x0000a3c0, 0x20202020},
-	{0x0000a3c4, 0x22222220},
-	{0x0000a3c8, 0x20200020},
-	{0x0000a3cc, 0x20202020},
-	{0x0000a3d0, 0x20202020},
-	{0x0000a3d4, 0x20202020},
-	{0x0000a3d8, 0x20202020},
-	{0x0000a3dc, 0x20202020},
-	{0x0000a3e0, 0x20202020},
-	{0x0000a3e4, 0x20202020},
-	{0x0000a3e8, 0x20202020},
-	{0x0000a3ec, 0x20202020},
-	{0x0000a3f0, 0x00000000},
-	{0x0000a3f4, 0x00000006},
-	{0x0000a3f8, 0x0c9bd380},
-	{0x0000a3fc, 0x000f0f01},
-	{0x0000a400, 0x8fa91f01},
-	{0x0000a404, 0x00000000},
-	{0x0000a408, 0x0e79e5c6},
-	{0x0000a40c, 0x00820820},
-	{0x0000a414, 0x1ce739ce},
-	{0x0000a418, 0x2d001dce},
-	{0x0000a434, 0x00000000},
-	{0x0000a438, 0x00001801},
-	{0x0000a43c, 0x00100000},
-	{0x0000a444, 0x00000000},
-	{0x0000a448, 0x05000080},
-	{0x0000a44c, 0x00000001},
-	{0x0000a450, 0x00010000},
-	{0x0000a454, 0x07000000},
-	{0x0000a644, 0xbfad9d74},
-	{0x0000a648, 0x0048060a},
-	{0x0000a64c, 0x00002037},
-	{0x0000a670, 0x03020100},
-	{0x0000a674, 0x09080504},
-	{0x0000a678, 0x0d0c0b0a},
-	{0x0000a67c, 0x13121110},
-	{0x0000a680, 0x31301514},
-	{0x0000a684, 0x35343332},
-	{0x0000a688, 0x00000036},
-	{0x0000a690, 0x00000838},
-	{0x0000a6b0, 0x0000000a},
-	{0x0000a6b4, 0x00512c01},
-	{0x0000a7c0, 0x00000000},
-	{0x0000a7c4, 0xfffffffc},
-	{0x0000a7c8, 0x00000000},
-	{0x0000a7cc, 0x00000000},
-	{0x0000a7d0, 0x00000000},
-	{0x0000a7d4, 0x00000004},
-	{0x0000a7dc, 0x00000000},
-	{0x0000a7f0, 0x80000000},
-	{0x0000a8d0, 0x004b6a8e},
-	{0x0000a8d4, 0x00000820},
-	{0x0000a8dc, 0x00000000},
-	{0x0000a8f0, 0x00000000},
-	{0x0000a8f4, 0x00000000},
-	{0x0000abf0, 0x80000000},
-	{0x0000b2d0, 0x00000080},
-	{0x0000b2d4, 0x00000000},
-	{0x0000b2ec, 0x00000000},
-	{0x0000b2f0, 0x00000000},
-	{0x0000b2f4, 0x00000000},
-	{0x0000b2f8, 0x00000000},
-	{0x0000b408, 0x0e79e5c0},
-	{0x0000b40c, 0x00820820},
-	{0x0000b420, 0x00000000},
-	{0x0000b6b0, 0x0000000a},
-	{0x0000b6b4, 0x00000001},
-};
-
 static const u32 ar9462_2p1_baseband_postamble[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
@@ -404,72 +280,6 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = {
 	{0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
 };
 
-static const u32 ar9462_2p1_radio_core[][2] = {
-	/* Addr      allmodes  */
-	{0x00016000, 0x36db6db6},
-	{0x00016004, 0x6db6db40},
-	{0x00016008, 0x73f00000},
-	{0x0001600c, 0x00000000},
-	{0x00016010, 0x6d820001},
-	{0x00016040, 0x7f80fff8},
-	{0x0001604c, 0x2699e04f},
-	{0x00016050, 0x6db6db6c},
-	{0x00016058, 0x6c200000},
-	{0x00016080, 0x000c0000},
-	{0x00016084, 0x9a68048c},
-	{0x00016088, 0x54214514},
-	{0x0001608c, 0x1203040b},
-	{0x00016090, 0x24926490},
-	{0x00016098, 0xd2888888},
-	{0x000160a0, 0x0a108ffe},
-	{0x000160a4, 0x812fc491},
-	{0x000160a8, 0x423c8000},
-	{0x000160b4, 0x92000000},
-	{0x000160b8, 0x0285dddc},
-	{0x000160bc, 0x02908888},
-	{0x000160c0, 0x00adb6d0},
-	{0x000160c4, 0x6db6db60},
-	{0x000160c8, 0x6db6db6c},
-	{0x000160cc, 0x0de6c1b0},
-	{0x00016100, 0x3fffbe04},
-	{0x00016104, 0xfff80000},
-	{0x00016108, 0x00200400},
-	{0x00016110, 0x00000000},
-	{0x00016144, 0x02084080},
-	{0x00016148, 0x000080c0},
-	{0x00016280, 0x050a0001},
-	{0x00016284, 0x3d841418},
-	{0x00016288, 0x00000000},
-	{0x0001628c, 0xe3000000},
-	{0x00016290, 0xa1005080},
-	{0x00016294, 0x00000020},
-	{0x00016298, 0x54a82900},
-	{0x00016340, 0x121e4276},
-	{0x00016344, 0x00300000},
-	{0x00016400, 0x36db6db6},
-	{0x00016404, 0x6db6db40},
-	{0x00016408, 0x73f00000},
-	{0x0001640c, 0x00000000},
-	{0x00016410, 0x6c800001},
-	{0x00016440, 0x7f80fff8},
-	{0x0001644c, 0x4699e04f},
-	{0x00016450, 0x6db6db6c},
-	{0x00016500, 0x3fffbe04},
-	{0x00016504, 0xfff80000},
-	{0x00016508, 0x00200400},
-	{0x00016510, 0x00000000},
-	{0x00016544, 0x02084080},
-	{0x00016548, 0x000080c0},
-};
-
-static const u32 ar9462_2p1_radio_postamble[][5] = {
-	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
-	{0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
-	{0x0001610c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
-	{0x0001650c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
-};
-
 static const u32 ar9462_2p1_soc_preamble[][2] = {
 	/* Addr      allmodes  */
 	{0x000040a4, 0x00a0c9c9},
@@ -478,1297 +288,4 @@ static const u32 ar9462_2p1_soc_preamble[][2] = {
 	{0x00007038, 0x000004c2},
 };
 
-static const u32 ar9462_2p1_soc_postamble[][5] = {
-	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x00007010, 0x00000033, 0x00000033, 0x00000033, 0x00000033},
-};
-
-static const u32 ar9462_2p1_radio_postamble_sys2ant[][5] = {
-	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
-	{0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
-	{0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
-};
-
-static const u32 ar9462_2p1_common_rx_gain[][2] = {
-	/* Addr      allmodes  */
-	{0x0000a000, 0x00010000},
-	{0x0000a004, 0x00030002},
-	{0x0000a008, 0x00050004},
-	{0x0000a00c, 0x00810080},
-	{0x0000a010, 0x00830082},
-	{0x0000a014, 0x01810180},
-	{0x0000a018, 0x01830182},
-	{0x0000a01c, 0x01850184},
-	{0x0000a020, 0x01890188},
-	{0x0000a024, 0x018b018a},
-	{0x0000a028, 0x018d018c},
-	{0x0000a02c, 0x01910190},
-	{0x0000a030, 0x01930192},
-	{0x0000a034, 0x01950194},
-	{0x0000a038, 0x038a0196},
-	{0x0000a03c, 0x038c038b},
-	{0x0000a040, 0x0390038d},
-	{0x0000a044, 0x03920391},
-	{0x0000a048, 0x03940393},
-	{0x0000a04c, 0x03960395},
-	{0x0000a050, 0x00000000},
-	{0x0000a054, 0x00000000},
-	{0x0000a058, 0x00000000},
-	{0x0000a05c, 0x00000000},
-	{0x0000a060, 0x00000000},
-	{0x0000a064, 0x00000000},
-	{0x0000a068, 0x00000000},
-	{0x0000a06c, 0x00000000},
-	{0x0000a070, 0x00000000},
-	{0x0000a074, 0x00000000},
-	{0x0000a078, 0x00000000},
-	{0x0000a07c, 0x00000000},
-	{0x0000a080, 0x22222229},
-	{0x0000a084, 0x1d1d1d1d},
-	{0x0000a088, 0x1d1d1d1d},
-	{0x0000a08c, 0x1d1d1d1d},
-	{0x0000a090, 0x171d1d1d},
-	{0x0000a094, 0x11111717},
-	{0x0000a098, 0x00030311},
-	{0x0000a09c, 0x00000000},
-	{0x0000a0a0, 0x00000000},
-	{0x0000a0a4, 0x00000000},
-	{0x0000a0a8, 0x00000000},
-	{0x0000a0ac, 0x00000000},
-	{0x0000a0b0, 0x00000000},
-	{0x0000a0b4, 0x00000000},
-	{0x0000a0b8, 0x00000000},
-	{0x0000a0bc, 0x00000000},
-	{0x0000a0c0, 0x001f0000},
-	{0x0000a0c4, 0x01000101},
-	{0x0000a0c8, 0x011e011f},
-	{0x0000a0cc, 0x011c011d},
-	{0x0000a0d0, 0x02030204},
-	{0x0000a0d4, 0x02010202},
-	{0x0000a0d8, 0x021f0200},
-	{0x0000a0dc, 0x0302021e},
-	{0x0000a0e0, 0x03000301},
-	{0x0000a0e4, 0x031e031f},
-	{0x0000a0e8, 0x0402031d},
-	{0x0000a0ec, 0x04000401},
-	{0x0000a0f0, 0x041e041f},
-	{0x0000a0f4, 0x0502041d},
-	{0x0000a0f8, 0x05000501},
-	{0x0000a0fc, 0x051e051f},
-	{0x0000a100, 0x06010602},
-	{0x0000a104, 0x061f0600},
-	{0x0000a108, 0x061d061e},
-	{0x0000a10c, 0x07020703},
-	{0x0000a110, 0x07000701},
-	{0x0000a114, 0x00000000},
-	{0x0000a118, 0x00000000},
-	{0x0000a11c, 0x00000000},
-	{0x0000a120, 0x00000000},
-	{0x0000a124, 0x00000000},
-	{0x0000a128, 0x00000000},
-	{0x0000a12c, 0x00000000},
-	{0x0000a130, 0x00000000},
-	{0x0000a134, 0x00000000},
-	{0x0000a138, 0x00000000},
-	{0x0000a13c, 0x00000000},
-	{0x0000a140, 0x001f0000},
-	{0x0000a144, 0x01000101},
-	{0x0000a148, 0x011e011f},
-	{0x0000a14c, 0x011c011d},
-	{0x0000a150, 0x02030204},
-	{0x0000a154, 0x02010202},
-	{0x0000a158, 0x021f0200},
-	{0x0000a15c, 0x0302021e},
-	{0x0000a160, 0x03000301},
-	{0x0000a164, 0x031e031f},
-	{0x0000a168, 0x0402031d},
-	{0x0000a16c, 0x04000401},
-	{0x0000a170, 0x041e041f},
-	{0x0000a174, 0x0502041d},
-	{0x0000a178, 0x05000501},
-	{0x0000a17c, 0x051e051f},
-	{0x0000a180, 0x06010602},
-	{0x0000a184, 0x061f0600},
-	{0x0000a188, 0x061d061e},
-	{0x0000a18c, 0x07020703},
-	{0x0000a190, 0x07000701},
-	{0x0000a194, 0x00000000},
-	{0x0000a198, 0x00000000},
-	{0x0000a19c, 0x00000000},
-	{0x0000a1a0, 0x00000000},
-	{0x0000a1a4, 0x00000000},
-	{0x0000a1a8, 0x00000000},
-	{0x0000a1ac, 0x00000000},
-	{0x0000a1b0, 0x00000000},
-	{0x0000a1b4, 0x00000000},
-	{0x0000a1b8, 0x00000000},
-	{0x0000a1bc, 0x00000000},
-	{0x0000a1c0, 0x00000000},
-	{0x0000a1c4, 0x00000000},
-	{0x0000a1c8, 0x00000000},
-	{0x0000a1cc, 0x00000000},
-	{0x0000a1d0, 0x00000000},
-	{0x0000a1d4, 0x00000000},
-	{0x0000a1d8, 0x00000000},
-	{0x0000a1dc, 0x00000000},
-	{0x0000a1e0, 0x00000000},
-	{0x0000a1e4, 0x00000000},
-	{0x0000a1e8, 0x00000000},
-	{0x0000a1ec, 0x00000000},
-	{0x0000a1f0, 0x00000396},
-	{0x0000a1f4, 0x00000396},
-	{0x0000a1f8, 0x00000396},
-	{0x0000a1fc, 0x00000196},
-	{0x0000b000, 0x00010000},
-	{0x0000b004, 0x00030002},
-	{0x0000b008, 0x00050004},
-	{0x0000b00c, 0x00810080},
-	{0x0000b010, 0x00830082},
-	{0x0000b014, 0x01810180},
-	{0x0000b018, 0x01830182},
-	{0x0000b01c, 0x01850184},
-	{0x0000b020, 0x02810280},
-	{0x0000b024, 0x02830282},
-	{0x0000b028, 0x02850284},
-	{0x0000b02c, 0x02890288},
-	{0x0000b030, 0x028b028a},
-	{0x0000b034, 0x0388028c},
-	{0x0000b038, 0x038a0389},
-	{0x0000b03c, 0x038c038b},
-	{0x0000b040, 0x0390038d},
-	{0x0000b044, 0x03920391},
-	{0x0000b048, 0x03940393},
-	{0x0000b04c, 0x03960395},
-	{0x0000b050, 0x00000000},
-	{0x0000b054, 0x00000000},
-	{0x0000b058, 0x00000000},
-	{0x0000b05c, 0x00000000},
-	{0x0000b060, 0x00000000},
-	{0x0000b064, 0x00000000},
-	{0x0000b068, 0x00000000},
-	{0x0000b06c, 0x00000000},
-	{0x0000b070, 0x00000000},
-	{0x0000b074, 0x00000000},
-	{0x0000b078, 0x00000000},
-	{0x0000b07c, 0x00000000},
-	{0x0000b080, 0x2a2d2f32},
-	{0x0000b084, 0x21232328},
-	{0x0000b088, 0x19191c1e},
-	{0x0000b08c, 0x12141417},
-	{0x0000b090, 0x07070e0e},
-	{0x0000b094, 0x03030305},
-	{0x0000b098, 0x00000003},
-	{0x0000b09c, 0x00000000},
-	{0x0000b0a0, 0x00000000},
-	{0x0000b0a4, 0x00000000},
-	{0x0000b0a8, 0x00000000},
-	{0x0000b0ac, 0x00000000},
-	{0x0000b0b0, 0x00000000},
-	{0x0000b0b4, 0x00000000},
-	{0x0000b0b8, 0x00000000},
-	{0x0000b0bc, 0x00000000},
-	{0x0000b0c0, 0x003f0020},
-	{0x0000b0c4, 0x00400041},
-	{0x0000b0c8, 0x0140005f},
-	{0x0000b0cc, 0x0160015f},
-	{0x0000b0d0, 0x017e017f},
-	{0x0000b0d4, 0x02410242},
-	{0x0000b0d8, 0x025f0240},
-	{0x0000b0dc, 0x027f0260},
-	{0x0000b0e0, 0x0341027e},
-	{0x0000b0e4, 0x035f0340},
-	{0x0000b0e8, 0x037f0360},
-	{0x0000b0ec, 0x04400441},
-	{0x0000b0f0, 0x0460045f},
-	{0x0000b0f4, 0x0541047f},
-	{0x0000b0f8, 0x055f0540},
-	{0x0000b0fc, 0x057f0560},
-	{0x0000b100, 0x06400641},
-	{0x0000b104, 0x0660065f},
-	{0x0000b108, 0x067e067f},
-	{0x0000b10c, 0x07410742},
-	{0x0000b110, 0x075f0740},
-	{0x0000b114, 0x077f0760},
-	{0x0000b118, 0x07800781},
-	{0x0000b11c, 0x07a0079f},
-	{0x0000b120, 0x07c107bf},
-	{0x0000b124, 0x000007c0},
-	{0x0000b128, 0x00000000},
-	{0x0000b12c, 0x00000000},
-	{0x0000b130, 0x00000000},
-	{0x0000b134, 0x00000000},
-	{0x0000b138, 0x00000000},
-	{0x0000b13c, 0x00000000},
-	{0x0000b140, 0x003f0020},
-	{0x0000b144, 0x00400041},
-	{0x0000b148, 0x0140005f},
-	{0x0000b14c, 0x0160015f},
-	{0x0000b150, 0x017e017f},
-	{0x0000b154, 0x02410242},
-	{0x0000b158, 0x025f0240},
-	{0x0000b15c, 0x027f0260},
-	{0x0000b160, 0x0341027e},
-	{0x0000b164, 0x035f0340},
-	{0x0000b168, 0x037f0360},
-	{0x0000b16c, 0x04400441},
-	{0x0000b170, 0x0460045f},
-	{0x0000b174, 0x0541047f},
-	{0x0000b178, 0x055f0540},
-	{0x0000b17c, 0x057f0560},
-	{0x0000b180, 0x06400641},
-	{0x0000b184, 0x0660065f},
-	{0x0000b188, 0x067e067f},
-	{0x0000b18c, 0x07410742},
-	{0x0000b190, 0x075f0740},
-	{0x0000b194, 0x077f0760},
-	{0x0000b198, 0x07800781},
-	{0x0000b19c, 0x07a0079f},
-	{0x0000b1a0, 0x07c107bf},
-	{0x0000b1a4, 0x000007c0},
-	{0x0000b1a8, 0x00000000},
-	{0x0000b1ac, 0x00000000},
-	{0x0000b1b0, 0x00000000},
-	{0x0000b1b4, 0x00000000},
-	{0x0000b1b8, 0x00000000},
-	{0x0000b1bc, 0x00000000},
-	{0x0000b1c0, 0x00000000},
-	{0x0000b1c4, 0x00000000},
-	{0x0000b1c8, 0x00000000},
-	{0x0000b1cc, 0x00000000},
-	{0x0000b1d0, 0x00000000},
-	{0x0000b1d4, 0x00000000},
-	{0x0000b1d8, 0x00000000},
-	{0x0000b1dc, 0x00000000},
-	{0x0000b1e0, 0x00000000},
-	{0x0000b1e4, 0x00000000},
-	{0x0000b1e8, 0x00000000},
-	{0x0000b1ec, 0x00000000},
-	{0x0000b1f0, 0x00000396},
-	{0x0000b1f4, 0x00000396},
-	{0x0000b1f8, 0x00000396},
-	{0x0000b1fc, 0x00000196},
-};
-
-static const u32 ar9462_2p1_common_mixed_rx_gain[][2] = {
-	/* Addr      allmodes  */
-	{0x0000a000, 0x00010000},
-	{0x0000a004, 0x00030002},
-	{0x0000a008, 0x00050004},
-	{0x0000a00c, 0x00810080},
-	{0x0000a010, 0x00830082},
-	{0x0000a014, 0x01810180},
-	{0x0000a018, 0x01830182},
-	{0x0000a01c, 0x01850184},
-	{0x0000a020, 0x01890188},
-	{0x0000a024, 0x018b018a},
-	{0x0000a028, 0x018d018c},
-	{0x0000a02c, 0x03820190},
-	{0x0000a030, 0x03840383},
-	{0x0000a034, 0x03880385},
-	{0x0000a038, 0x038a0389},
-	{0x0000a03c, 0x038c038b},
-	{0x0000a040, 0x0390038d},
-	{0x0000a044, 0x03920391},
-	{0x0000a048, 0x03940393},
-	{0x0000a04c, 0x03960395},
-	{0x0000a050, 0x00000000},
-	{0x0000a054, 0x00000000},
-	{0x0000a058, 0x00000000},
-	{0x0000a05c, 0x00000000},
-	{0x0000a060, 0x00000000},
-	{0x0000a064, 0x00000000},
-	{0x0000a068, 0x00000000},
-	{0x0000a06c, 0x00000000},
-	{0x0000a070, 0x00000000},
-	{0x0000a074, 0x00000000},
-	{0x0000a078, 0x00000000},
-	{0x0000a07c, 0x00000000},
-	{0x0000a080, 0x29292929},
-	{0x0000a084, 0x29292929},
-	{0x0000a088, 0x29292929},
-	{0x0000a08c, 0x29292929},
-	{0x0000a090, 0x22292929},
-	{0x0000a094, 0x1d1d2222},
-	{0x0000a098, 0x0c111117},
-	{0x0000a09c, 0x00030303},
-	{0x0000a0a0, 0x00000000},
-	{0x0000a0a4, 0x00000000},
-	{0x0000a0a8, 0x00000000},
-	{0x0000a0ac, 0x00000000},
-	{0x0000a0b0, 0x00000000},
-	{0x0000a0b4, 0x00000000},
-	{0x0000a0b8, 0x00000000},
-	{0x0000a0bc, 0x00000000},
-	{0x0000a0c0, 0x001f0000},
-	{0x0000a0c4, 0x01000101},
-	{0x0000a0c8, 0x011e011f},
-	{0x0000a0cc, 0x011c011d},
-	{0x0000a0d0, 0x02030204},
-	{0x0000a0d4, 0x02010202},
-	{0x0000a0d8, 0x021f0200},
-	{0x0000a0dc, 0x0302021e},
-	{0x0000a0e0, 0x03000301},
-	{0x0000a0e4, 0x031e031f},
-	{0x0000a0e8, 0x0402031d},
-	{0x0000a0ec, 0x04000401},
-	{0x0000a0f0, 0x041e041f},
-	{0x0000a0f4, 0x0502041d},
-	{0x0000a0f8, 0x05000501},
-	{0x0000a0fc, 0x051e051f},
-	{0x0000a100, 0x06010602},
-	{0x0000a104, 0x061f0600},
-	{0x0000a108, 0x061d061e},
-	{0x0000a10c, 0x07020703},
-	{0x0000a110, 0x07000701},
-	{0x0000a114, 0x00000000},
-	{0x0000a118, 0x00000000},
-	{0x0000a11c, 0x00000000},
-	{0x0000a120, 0x00000000},
-	{0x0000a124, 0x00000000},
-	{0x0000a128, 0x00000000},
-	{0x0000a12c, 0x00000000},
-	{0x0000a130, 0x00000000},
-	{0x0000a134, 0x00000000},
-	{0x0000a138, 0x00000000},
-	{0x0000a13c, 0x00000000},
-	{0x0000a140, 0x001f0000},
-	{0x0000a144, 0x01000101},
-	{0x0000a148, 0x011e011f},
-	{0x0000a14c, 0x011c011d},
-	{0x0000a150, 0x02030204},
-	{0x0000a154, 0x02010202},
-	{0x0000a158, 0x021f0200},
-	{0x0000a15c, 0x0302021e},
-	{0x0000a160, 0x03000301},
-	{0x0000a164, 0x031e031f},
-	{0x0000a168, 0x0402031d},
-	{0x0000a16c, 0x04000401},
-	{0x0000a170, 0x041e041f},
-	{0x0000a174, 0x0502041d},
-	{0x0000a178, 0x05000501},
-	{0x0000a17c, 0x051e051f},
-	{0x0000a180, 0x06010602},
-	{0x0000a184, 0x061f0600},
-	{0x0000a188, 0x061d061e},
-	{0x0000a18c, 0x07020703},
-	{0x0000a190, 0x07000701},
-	{0x0000a194, 0x00000000},
-	{0x0000a198, 0x00000000},
-	{0x0000a19c, 0x00000000},
-	{0x0000a1a0, 0x00000000},
-	{0x0000a1a4, 0x00000000},
-	{0x0000a1a8, 0x00000000},
-	{0x0000a1ac, 0x00000000},
-	{0x0000a1b0, 0x00000000},
-	{0x0000a1b4, 0x00000000},
-	{0x0000a1b8, 0x00000000},
-	{0x0000a1bc, 0x00000000},
-	{0x0000a1c0, 0x00000000},
-	{0x0000a1c4, 0x00000000},
-	{0x0000a1c8, 0x00000000},
-	{0x0000a1cc, 0x00000000},
-	{0x0000a1d0, 0x00000000},
-	{0x0000a1d4, 0x00000000},
-	{0x0000a1d8, 0x00000000},
-	{0x0000a1dc, 0x00000000},
-	{0x0000a1e0, 0x00000000},
-	{0x0000a1e4, 0x00000000},
-	{0x0000a1e8, 0x00000000},
-	{0x0000a1ec, 0x00000000},
-	{0x0000a1f0, 0x00000396},
-	{0x0000a1f4, 0x00000396},
-	{0x0000a1f8, 0x00000396},
-	{0x0000a1fc, 0x00000196},
-	{0x0000b000, 0x00010000},
-	{0x0000b004, 0x00030002},
-	{0x0000b008, 0x00050004},
-	{0x0000b00c, 0x00810080},
-	{0x0000b010, 0x00830082},
-	{0x0000b014, 0x01810180},
-	{0x0000b018, 0x01830182},
-	{0x0000b01c, 0x01850184},
-	{0x0000b020, 0x02810280},
-	{0x0000b024, 0x02830282},
-	{0x0000b028, 0x02850284},
-	{0x0000b02c, 0x02890288},
-	{0x0000b030, 0x028b028a},
-	{0x0000b034, 0x0388028c},
-	{0x0000b038, 0x038a0389},
-	{0x0000b03c, 0x038c038b},
-	{0x0000b040, 0x0390038d},
-	{0x0000b044, 0x03920391},
-	{0x0000b048, 0x03940393},
-	{0x0000b04c, 0x03960395},
-	{0x0000b050, 0x00000000},
-	{0x0000b054, 0x00000000},
-	{0x0000b058, 0x00000000},
-	{0x0000b05c, 0x00000000},
-	{0x0000b060, 0x00000000},
-	{0x0000b064, 0x00000000},
-	{0x0000b068, 0x00000000},
-	{0x0000b06c, 0x00000000},
-	{0x0000b070, 0x00000000},
-	{0x0000b074, 0x00000000},
-	{0x0000b078, 0x00000000},
-	{0x0000b07c, 0x00000000},
-	{0x0000b080, 0x2a2d2f32},
-	{0x0000b084, 0x21232328},
-	{0x0000b088, 0x19191c1e},
-	{0x0000b08c, 0x12141417},
-	{0x0000b090, 0x07070e0e},
-	{0x0000b094, 0x03030305},
-	{0x0000b098, 0x00000003},
-	{0x0000b09c, 0x00000000},
-	{0x0000b0a0, 0x00000000},
-	{0x0000b0a4, 0x00000000},
-	{0x0000b0a8, 0x00000000},
-	{0x0000b0ac, 0x00000000},
-	{0x0000b0b0, 0x00000000},
-	{0x0000b0b4, 0x00000000},
-	{0x0000b0b8, 0x00000000},
-	{0x0000b0bc, 0x00000000},
-	{0x0000b0c0, 0x003f0020},
-	{0x0000b0c4, 0x00400041},
-	{0x0000b0c8, 0x0140005f},
-	{0x0000b0cc, 0x0160015f},
-	{0x0000b0d0, 0x017e017f},
-	{0x0000b0d4, 0x02410242},
-	{0x0000b0d8, 0x025f0240},
-	{0x0000b0dc, 0x027f0260},
-	{0x0000b0e0, 0x0341027e},
-	{0x0000b0e4, 0x035f0340},
-	{0x0000b0e8, 0x037f0360},
-	{0x0000b0ec, 0x04400441},
-	{0x0000b0f0, 0x0460045f},
-	{0x0000b0f4, 0x0541047f},
-	{0x0000b0f8, 0x055f0540},
-	{0x0000b0fc, 0x057f0560},
-	{0x0000b100, 0x06400641},
-	{0x0000b104, 0x0660065f},
-	{0x0000b108, 0x067e067f},
-	{0x0000b10c, 0x07410742},
-	{0x0000b110, 0x075f0740},
-	{0x0000b114, 0x077f0760},
-	{0x0000b118, 0x07800781},
-	{0x0000b11c, 0x07a0079f},
-	{0x0000b120, 0x07c107bf},
-	{0x0000b124, 0x000007c0},
-	{0x0000b128, 0x00000000},
-	{0x0000b12c, 0x00000000},
-	{0x0000b130, 0x00000000},
-	{0x0000b134, 0x00000000},
-	{0x0000b138, 0x00000000},
-	{0x0000b13c, 0x00000000},
-	{0x0000b140, 0x003f0020},
-	{0x0000b144, 0x00400041},
-	{0x0000b148, 0x0140005f},
-	{0x0000b14c, 0x0160015f},
-	{0x0000b150, 0x017e017f},
-	{0x0000b154, 0x02410242},
-	{0x0000b158, 0x025f0240},
-	{0x0000b15c, 0x027f0260},
-	{0x0000b160, 0x0341027e},
-	{0x0000b164, 0x035f0340},
-	{0x0000b168, 0x037f0360},
-	{0x0000b16c, 0x04400441},
-	{0x0000b170, 0x0460045f},
-	{0x0000b174, 0x0541047f},
-	{0x0000b178, 0x055f0540},
-	{0x0000b17c, 0x057f0560},
-	{0x0000b180, 0x06400641},
-	{0x0000b184, 0x0660065f},
-	{0x0000b188, 0x067e067f},
-	{0x0000b18c, 0x07410742},
-	{0x0000b190, 0x075f0740},
-	{0x0000b194, 0x077f0760},
-	{0x0000b198, 0x07800781},
-	{0x0000b19c, 0x07a0079f},
-	{0x0000b1a0, 0x07c107bf},
-	{0x0000b1a4, 0x000007c0},
-	{0x0000b1a8, 0x00000000},
-	{0x0000b1ac, 0x00000000},
-	{0x0000b1b0, 0x00000000},
-	{0x0000b1b4, 0x00000000},
-	{0x0000b1b8, 0x00000000},
-	{0x0000b1bc, 0x00000000},
-	{0x0000b1c0, 0x00000000},
-	{0x0000b1c4, 0x00000000},
-	{0x0000b1c8, 0x00000000},
-	{0x0000b1cc, 0x00000000},
-	{0x0000b1d0, 0x00000000},
-	{0x0000b1d4, 0x00000000},
-	{0x0000b1d8, 0x00000000},
-	{0x0000b1dc, 0x00000000},
-	{0x0000b1e0, 0x00000000},
-	{0x0000b1e4, 0x00000000},
-	{0x0000b1e8, 0x00000000},
-	{0x0000b1ec, 0x00000000},
-	{0x0000b1f0, 0x00000396},
-	{0x0000b1f4, 0x00000396},
-	{0x0000b1f8, 0x00000396},
-	{0x0000b1fc, 0x00000196},
-};
-
-static const u32 ar9462_2p1_baseband_core_mix_rxgain[][2] = {
-	/* Addr      allmodes  */
-	{0x00009fd0, 0x0a2d6b93},
-};
-
-static const u32 ar9462_2p1_baseband_postamble_mix_rxgain[][5] = {
-	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x00009820, 0x206a022e, 0x206a022e, 0x206a01ae, 0x206a01ae},
-	{0x00009824, 0x63c640de, 0x5ac640d0, 0x63c640da, 0x63c640da},
-	{0x00009828, 0x0796be89, 0x0696b081, 0x0916be81, 0x0916be81},
-	{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000d8, 0x6c4000d8},
-	{0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec86d2e, 0x7ec86d2e},
-	{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32395c5e},
-};
-
-static const u32 ar9462_2p1_baseband_postamble_5g_xlna[][5] = {
-	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
-};
-
-static const u32 ar9462_2p1_common_wo_xlna_rx_gain[][2] = {
-	/* Addr      allmodes  */
-	{0x0000a000, 0x00010000},
-	{0x0000a004, 0x00030002},
-	{0x0000a008, 0x00050004},
-	{0x0000a00c, 0x00810080},
-	{0x0000a010, 0x00830082},
-	{0x0000a014, 0x01810180},
-	{0x0000a018, 0x01830182},
-	{0x0000a01c, 0x01850184},
-	{0x0000a020, 0x01890188},
-	{0x0000a024, 0x018b018a},
-	{0x0000a028, 0x018d018c},
-	{0x0000a02c, 0x03820190},
-	{0x0000a030, 0x03840383},
-	{0x0000a034, 0x03880385},
-	{0x0000a038, 0x038a0389},
-	{0x0000a03c, 0x038c038b},
-	{0x0000a040, 0x0390038d},
-	{0x0000a044, 0x03920391},
-	{0x0000a048, 0x03940393},
-	{0x0000a04c, 0x03960395},
-	{0x0000a050, 0x00000000},
-	{0x0000a054, 0x00000000},
-	{0x0000a058, 0x00000000},
-	{0x0000a05c, 0x00000000},
-	{0x0000a060, 0x00000000},
-	{0x0000a064, 0x00000000},
-	{0x0000a068, 0x00000000},
-	{0x0000a06c, 0x00000000},
-	{0x0000a070, 0x00000000},
-	{0x0000a074, 0x00000000},
-	{0x0000a078, 0x00000000},
-	{0x0000a07c, 0x00000000},
-	{0x0000a080, 0x29292929},
-	{0x0000a084, 0x29292929},
-	{0x0000a088, 0x29292929},
-	{0x0000a08c, 0x29292929},
-	{0x0000a090, 0x22292929},
-	{0x0000a094, 0x1d1d2222},
-	{0x0000a098, 0x0c111117},
-	{0x0000a09c, 0x00030303},
-	{0x0000a0a0, 0x00000000},
-	{0x0000a0a4, 0x00000000},
-	{0x0000a0a8, 0x00000000},
-	{0x0000a0ac, 0x00000000},
-	{0x0000a0b0, 0x00000000},
-	{0x0000a0b4, 0x00000000},
-	{0x0000a0b8, 0x00000000},
-	{0x0000a0bc, 0x00000000},
-	{0x0000a0c0, 0x001f0000},
-	{0x0000a0c4, 0x01000101},
-	{0x0000a0c8, 0x011e011f},
-	{0x0000a0cc, 0x011c011d},
-	{0x0000a0d0, 0x02030204},
-	{0x0000a0d4, 0x02010202},
-	{0x0000a0d8, 0x021f0200},
-	{0x0000a0dc, 0x0302021e},
-	{0x0000a0e0, 0x03000301},
-	{0x0000a0e4, 0x031e031f},
-	{0x0000a0e8, 0x0402031d},
-	{0x0000a0ec, 0x04000401},
-	{0x0000a0f0, 0x041e041f},
-	{0x0000a0f4, 0x0502041d},
-	{0x0000a0f8, 0x05000501},
-	{0x0000a0fc, 0x051e051f},
-	{0x0000a100, 0x06010602},
-	{0x0000a104, 0x061f0600},
-	{0x0000a108, 0x061d061e},
-	{0x0000a10c, 0x07020703},
-	{0x0000a110, 0x07000701},
-	{0x0000a114, 0x00000000},
-	{0x0000a118, 0x00000000},
-	{0x0000a11c, 0x00000000},
-	{0x0000a120, 0x00000000},
-	{0x0000a124, 0x00000000},
-	{0x0000a128, 0x00000000},
-	{0x0000a12c, 0x00000000},
-	{0x0000a130, 0x00000000},
-	{0x0000a134, 0x00000000},
-	{0x0000a138, 0x00000000},
-	{0x0000a13c, 0x00000000},
-	{0x0000a140, 0x001f0000},
-	{0x0000a144, 0x01000101},
-	{0x0000a148, 0x011e011f},
-	{0x0000a14c, 0x011c011d},
-	{0x0000a150, 0x02030204},
-	{0x0000a154, 0x02010202},
-	{0x0000a158, 0x021f0200},
-	{0x0000a15c, 0x0302021e},
-	{0x0000a160, 0x03000301},
-	{0x0000a164, 0x031e031f},
-	{0x0000a168, 0x0402031d},
-	{0x0000a16c, 0x04000401},
-	{0x0000a170, 0x041e041f},
-	{0x0000a174, 0x0502041d},
-	{0x0000a178, 0x05000501},
-	{0x0000a17c, 0x051e051f},
-	{0x0000a180, 0x06010602},
-	{0x0000a184, 0x061f0600},
-	{0x0000a188, 0x061d061e},
-	{0x0000a18c, 0x07020703},
-	{0x0000a190, 0x07000701},
-	{0x0000a194, 0x00000000},
-	{0x0000a198, 0x00000000},
-	{0x0000a19c, 0x00000000},
-	{0x0000a1a0, 0x00000000},
-	{0x0000a1a4, 0x00000000},
-	{0x0000a1a8, 0x00000000},
-	{0x0000a1ac, 0x00000000},
-	{0x0000a1b0, 0x00000000},
-	{0x0000a1b4, 0x00000000},
-	{0x0000a1b8, 0x00000000},
-	{0x0000a1bc, 0x00000000},
-	{0x0000a1c0, 0x00000000},
-	{0x0000a1c4, 0x00000000},
-	{0x0000a1c8, 0x00000000},
-	{0x0000a1cc, 0x00000000},
-	{0x0000a1d0, 0x00000000},
-	{0x0000a1d4, 0x00000000},
-	{0x0000a1d8, 0x00000000},
-	{0x0000a1dc, 0x00000000},
-	{0x0000a1e0, 0x00000000},
-	{0x0000a1e4, 0x00000000},
-	{0x0000a1e8, 0x00000000},
-	{0x0000a1ec, 0x00000000},
-	{0x0000a1f0, 0x00000396},
-	{0x0000a1f4, 0x00000396},
-	{0x0000a1f8, 0x00000396},
-	{0x0000a1fc, 0x00000196},
-	{0x0000b000, 0x00010000},
-	{0x0000b004, 0x00030002},
-	{0x0000b008, 0x00050004},
-	{0x0000b00c, 0x00810080},
-	{0x0000b010, 0x00830082},
-	{0x0000b014, 0x01810180},
-	{0x0000b018, 0x01830182},
-	{0x0000b01c, 0x01850184},
-	{0x0000b020, 0x02810280},
-	{0x0000b024, 0x02830282},
-	{0x0000b028, 0x02850284},
-	{0x0000b02c, 0x02890288},
-	{0x0000b030, 0x028b028a},
-	{0x0000b034, 0x0388028c},
-	{0x0000b038, 0x038a0389},
-	{0x0000b03c, 0x038c038b},
-	{0x0000b040, 0x0390038d},
-	{0x0000b044, 0x03920391},
-	{0x0000b048, 0x03940393},
-	{0x0000b04c, 0x03960395},
-	{0x0000b050, 0x00000000},
-	{0x0000b054, 0x00000000},
-	{0x0000b058, 0x00000000},
-	{0x0000b05c, 0x00000000},
-	{0x0000b060, 0x00000000},
-	{0x0000b064, 0x00000000},
-	{0x0000b068, 0x00000000},
-	{0x0000b06c, 0x00000000},
-	{0x0000b070, 0x00000000},
-	{0x0000b074, 0x00000000},
-	{0x0000b078, 0x00000000},
-	{0x0000b07c, 0x00000000},
-	{0x0000b080, 0x32323232},
-	{0x0000b084, 0x2f2f3232},
-	{0x0000b088, 0x23282a2d},
-	{0x0000b08c, 0x1c1e2123},
-	{0x0000b090, 0x14171919},
-	{0x0000b094, 0x0e0e1214},
-	{0x0000b098, 0x03050707},
-	{0x0000b09c, 0x00030303},
-	{0x0000b0a0, 0x00000000},
-	{0x0000b0a4, 0x00000000},
-	{0x0000b0a8, 0x00000000},
-	{0x0000b0ac, 0x00000000},
-	{0x0000b0b0, 0x00000000},
-	{0x0000b0b4, 0x00000000},
-	{0x0000b0b8, 0x00000000},
-	{0x0000b0bc, 0x00000000},
-	{0x0000b0c0, 0x003f0020},
-	{0x0000b0c4, 0x00400041},
-	{0x0000b0c8, 0x0140005f},
-	{0x0000b0cc, 0x0160015f},
-	{0x0000b0d0, 0x017e017f},
-	{0x0000b0d4, 0x02410242},
-	{0x0000b0d8, 0x025f0240},
-	{0x0000b0dc, 0x027f0260},
-	{0x0000b0e0, 0x0341027e},
-	{0x0000b0e4, 0x035f0340},
-	{0x0000b0e8, 0x037f0360},
-	{0x0000b0ec, 0x04400441},
-	{0x0000b0f0, 0x0460045f},
-	{0x0000b0f4, 0x0541047f},
-	{0x0000b0f8, 0x055f0540},
-	{0x0000b0fc, 0x057f0560},
-	{0x0000b100, 0x06400641},
-	{0x0000b104, 0x0660065f},
-	{0x0000b108, 0x067e067f},
-	{0x0000b10c, 0x07410742},
-	{0x0000b110, 0x075f0740},
-	{0x0000b114, 0x077f0760},
-	{0x0000b118, 0x07800781},
-	{0x0000b11c, 0x07a0079f},
-	{0x0000b120, 0x07c107bf},
-	{0x0000b124, 0x000007c0},
-	{0x0000b128, 0x00000000},
-	{0x0000b12c, 0x00000000},
-	{0x0000b130, 0x00000000},
-	{0x0000b134, 0x00000000},
-	{0x0000b138, 0x00000000},
-	{0x0000b13c, 0x00000000},
-	{0x0000b140, 0x003f0020},
-	{0x0000b144, 0x00400041},
-	{0x0000b148, 0x0140005f},
-	{0x0000b14c, 0x0160015f},
-	{0x0000b150, 0x017e017f},
-	{0x0000b154, 0x02410242},
-	{0x0000b158, 0x025f0240},
-	{0x0000b15c, 0x027f0260},
-	{0x0000b160, 0x0341027e},
-	{0x0000b164, 0x035f0340},
-	{0x0000b168, 0x037f0360},
-	{0x0000b16c, 0x04400441},
-	{0x0000b170, 0x0460045f},
-	{0x0000b174, 0x0541047f},
-	{0x0000b178, 0x055f0540},
-	{0x0000b17c, 0x057f0560},
-	{0x0000b180, 0x06400641},
-	{0x0000b184, 0x0660065f},
-	{0x0000b188, 0x067e067f},
-	{0x0000b18c, 0x07410742},
-	{0x0000b190, 0x075f0740},
-	{0x0000b194, 0x077f0760},
-	{0x0000b198, 0x07800781},
-	{0x0000b19c, 0x07a0079f},
-	{0x0000b1a0, 0x07c107bf},
-	{0x0000b1a4, 0x000007c0},
-	{0x0000b1a8, 0x00000000},
-	{0x0000b1ac, 0x00000000},
-	{0x0000b1b0, 0x00000000},
-	{0x0000b1b4, 0x00000000},
-	{0x0000b1b8, 0x00000000},
-	{0x0000b1bc, 0x00000000},
-	{0x0000b1c0, 0x00000000},
-	{0x0000b1c4, 0x00000000},
-	{0x0000b1c8, 0x00000000},
-	{0x0000b1cc, 0x00000000},
-	{0x0000b1d0, 0x00000000},
-	{0x0000b1d4, 0x00000000},
-	{0x0000b1d8, 0x00000000},
-	{0x0000b1dc, 0x00000000},
-	{0x0000b1e0, 0x00000000},
-	{0x0000b1e4, 0x00000000},
-	{0x0000b1e8, 0x00000000},
-	{0x0000b1ec, 0x00000000},
-	{0x0000b1f0, 0x00000396},
-	{0x0000b1f4, 0x00000396},
-	{0x0000b1f8, 0x00000396},
-	{0x0000b1fc, 0x00000196},
-};
-
-static const u32 ar9462_2p1_common_5g_xlna_only_rx_gain[][2] = {
-	/* Addr      allmodes  */
-	{0x0000a000, 0x00010000},
-	{0x0000a004, 0x00030002},
-	{0x0000a008, 0x00050004},
-	{0x0000a00c, 0x00810080},
-	{0x0000a010, 0x00830082},
-	{0x0000a014, 0x01810180},
-	{0x0000a018, 0x01830182},
-	{0x0000a01c, 0x01850184},
-	{0x0000a020, 0x01890188},
-	{0x0000a024, 0x018b018a},
-	{0x0000a028, 0x018d018c},
-	{0x0000a02c, 0x03820190},
-	{0x0000a030, 0x03840383},
-	{0x0000a034, 0x03880385},
-	{0x0000a038, 0x038a0389},
-	{0x0000a03c, 0x038c038b},
-	{0x0000a040, 0x0390038d},
-	{0x0000a044, 0x03920391},
-	{0x0000a048, 0x03940393},
-	{0x0000a04c, 0x03960395},
-	{0x0000a050, 0x00000000},
-	{0x0000a054, 0x00000000},
-	{0x0000a058, 0x00000000},
-	{0x0000a05c, 0x00000000},
-	{0x0000a060, 0x00000000},
-	{0x0000a064, 0x00000000},
-	{0x0000a068, 0x00000000},
-	{0x0000a06c, 0x00000000},
-	{0x0000a070, 0x00000000},
-	{0x0000a074, 0x00000000},
-	{0x0000a078, 0x00000000},
-	{0x0000a07c, 0x00000000},
-	{0x0000a080, 0x29292929},
-	{0x0000a084, 0x29292929},
-	{0x0000a088, 0x29292929},
-	{0x0000a08c, 0x29292929},
-	{0x0000a090, 0x22292929},
-	{0x0000a094, 0x1d1d2222},
-	{0x0000a098, 0x0c111117},
-	{0x0000a09c, 0x00030303},
-	{0x0000a0a0, 0x00000000},
-	{0x0000a0a4, 0x00000000},
-	{0x0000a0a8, 0x00000000},
-	{0x0000a0ac, 0x00000000},
-	{0x0000a0b0, 0x00000000},
-	{0x0000a0b4, 0x00000000},
-	{0x0000a0b8, 0x00000000},
-	{0x0000a0bc, 0x00000000},
-	{0x0000a0c0, 0x001f0000},
-	{0x0000a0c4, 0x01000101},
-	{0x0000a0c8, 0x011e011f},
-	{0x0000a0cc, 0x011c011d},
-	{0x0000a0d0, 0x02030204},
-	{0x0000a0d4, 0x02010202},
-	{0x0000a0d8, 0x021f0200},
-	{0x0000a0dc, 0x0302021e},
-	{0x0000a0e0, 0x03000301},
-	{0x0000a0e4, 0x031e031f},
-	{0x0000a0e8, 0x0402031d},
-	{0x0000a0ec, 0x04000401},
-	{0x0000a0f0, 0x041e041f},
-	{0x0000a0f4, 0x0502041d},
-	{0x0000a0f8, 0x05000501},
-	{0x0000a0fc, 0x051e051f},
-	{0x0000a100, 0x06010602},
-	{0x0000a104, 0x061f0600},
-	{0x0000a108, 0x061d061e},
-	{0x0000a10c, 0x07020703},
-	{0x0000a110, 0x07000701},
-	{0x0000a114, 0x00000000},
-	{0x0000a118, 0x00000000},
-	{0x0000a11c, 0x00000000},
-	{0x0000a120, 0x00000000},
-	{0x0000a124, 0x00000000},
-	{0x0000a128, 0x00000000},
-	{0x0000a12c, 0x00000000},
-	{0x0000a130, 0x00000000},
-	{0x0000a134, 0x00000000},
-	{0x0000a138, 0x00000000},
-	{0x0000a13c, 0x00000000},
-	{0x0000a140, 0x001f0000},
-	{0x0000a144, 0x01000101},
-	{0x0000a148, 0x011e011f},
-	{0x0000a14c, 0x011c011d},
-	{0x0000a150, 0x02030204},
-	{0x0000a154, 0x02010202},
-	{0x0000a158, 0x021f0200},
-	{0x0000a15c, 0x0302021e},
-	{0x0000a160, 0x03000301},
-	{0x0000a164, 0x031e031f},
-	{0x0000a168, 0x0402031d},
-	{0x0000a16c, 0x04000401},
-	{0x0000a170, 0x041e041f},
-	{0x0000a174, 0x0502041d},
-	{0x0000a178, 0x05000501},
-	{0x0000a17c, 0x051e051f},
-	{0x0000a180, 0x06010602},
-	{0x0000a184, 0x061f0600},
-	{0x0000a188, 0x061d061e},
-	{0x0000a18c, 0x07020703},
-	{0x0000a190, 0x07000701},
-	{0x0000a194, 0x00000000},
-	{0x0000a198, 0x00000000},
-	{0x0000a19c, 0x00000000},
-	{0x0000a1a0, 0x00000000},
-	{0x0000a1a4, 0x00000000},
-	{0x0000a1a8, 0x00000000},
-	{0x0000a1ac, 0x00000000},
-	{0x0000a1b0, 0x00000000},
-	{0x0000a1b4, 0x00000000},
-	{0x0000a1b8, 0x00000000},
-	{0x0000a1bc, 0x00000000},
-	{0x0000a1c0, 0x00000000},
-	{0x0000a1c4, 0x00000000},
-	{0x0000a1c8, 0x00000000},
-	{0x0000a1cc, 0x00000000},
-	{0x0000a1d0, 0x00000000},
-	{0x0000a1d4, 0x00000000},
-	{0x0000a1d8, 0x00000000},
-	{0x0000a1dc, 0x00000000},
-	{0x0000a1e0, 0x00000000},
-	{0x0000a1e4, 0x00000000},
-	{0x0000a1e8, 0x00000000},
-	{0x0000a1ec, 0x00000000},
-	{0x0000a1f0, 0x00000396},
-	{0x0000a1f4, 0x00000396},
-	{0x0000a1f8, 0x00000396},
-	{0x0000a1fc, 0x00000196},
-	{0x0000b000, 0x00010000},
-	{0x0000b004, 0x00030002},
-	{0x0000b008, 0x00050004},
-	{0x0000b00c, 0x00810080},
-	{0x0000b010, 0x00830082},
-	{0x0000b014, 0x01810180},
-	{0x0000b018, 0x01830182},
-	{0x0000b01c, 0x01850184},
-	{0x0000b020, 0x02810280},
-	{0x0000b024, 0x02830282},
-	{0x0000b028, 0x02850284},
-	{0x0000b02c, 0x02890288},
-	{0x0000b030, 0x028b028a},
-	{0x0000b034, 0x0388028c},
-	{0x0000b038, 0x038a0389},
-	{0x0000b03c, 0x038c038b},
-	{0x0000b040, 0x0390038d},
-	{0x0000b044, 0x03920391},
-	{0x0000b048, 0x03940393},
-	{0x0000b04c, 0x03960395},
-	{0x0000b050, 0x00000000},
-	{0x0000b054, 0x00000000},
-	{0x0000b058, 0x00000000},
-	{0x0000b05c, 0x00000000},
-	{0x0000b060, 0x00000000},
-	{0x0000b064, 0x00000000},
-	{0x0000b068, 0x00000000},
-	{0x0000b06c, 0x00000000},
-	{0x0000b070, 0x00000000},
-	{0x0000b074, 0x00000000},
-	{0x0000b078, 0x00000000},
-	{0x0000b07c, 0x00000000},
-	{0x0000b080, 0x2a2d2f32},
-	{0x0000b084, 0x21232328},
-	{0x0000b088, 0x19191c1e},
-	{0x0000b08c, 0x12141417},
-	{0x0000b090, 0x07070e0e},
-	{0x0000b094, 0x03030305},
-	{0x0000b098, 0x00000003},
-	{0x0000b09c, 0x00000000},
-	{0x0000b0a0, 0x00000000},
-	{0x0000b0a4, 0x00000000},
-	{0x0000b0a8, 0x00000000},
-	{0x0000b0ac, 0x00000000},
-	{0x0000b0b0, 0x00000000},
-	{0x0000b0b4, 0x00000000},
-	{0x0000b0b8, 0x00000000},
-	{0x0000b0bc, 0x00000000},
-	{0x0000b0c0, 0x003f0020},
-	{0x0000b0c4, 0x00400041},
-	{0x0000b0c8, 0x0140005f},
-	{0x0000b0cc, 0x0160015f},
-	{0x0000b0d0, 0x017e017f},
-	{0x0000b0d4, 0x02410242},
-	{0x0000b0d8, 0x025f0240},
-	{0x0000b0dc, 0x027f0260},
-	{0x0000b0e0, 0x0341027e},
-	{0x0000b0e4, 0x035f0340},
-	{0x0000b0e8, 0x037f0360},
-	{0x0000b0ec, 0x04400441},
-	{0x0000b0f0, 0x0460045f},
-	{0x0000b0f4, 0x0541047f},
-	{0x0000b0f8, 0x055f0540},
-	{0x0000b0fc, 0x057f0560},
-	{0x0000b100, 0x06400641},
-	{0x0000b104, 0x0660065f},
-	{0x0000b108, 0x067e067f},
-	{0x0000b10c, 0x07410742},
-	{0x0000b110, 0x075f0740},
-	{0x0000b114, 0x077f0760},
-	{0x0000b118, 0x07800781},
-	{0x0000b11c, 0x07a0079f},
-	{0x0000b120, 0x07c107bf},
-	{0x0000b124, 0x000007c0},
-	{0x0000b128, 0x00000000},
-	{0x0000b12c, 0x00000000},
-	{0x0000b130, 0x00000000},
-	{0x0000b134, 0x00000000},
-	{0x0000b138, 0x00000000},
-	{0x0000b13c, 0x00000000},
-	{0x0000b140, 0x003f0020},
-	{0x0000b144, 0x00400041},
-	{0x0000b148, 0x0140005f},
-	{0x0000b14c, 0x0160015f},
-	{0x0000b150, 0x017e017f},
-	{0x0000b154, 0x02410242},
-	{0x0000b158, 0x025f0240},
-	{0x0000b15c, 0x027f0260},
-	{0x0000b160, 0x0341027e},
-	{0x0000b164, 0x035f0340},
-	{0x0000b168, 0x037f0360},
-	{0x0000b16c, 0x04400441},
-	{0x0000b170, 0x0460045f},
-	{0x0000b174, 0x0541047f},
-	{0x0000b178, 0x055f0540},
-	{0x0000b17c, 0x057f0560},
-	{0x0000b180, 0x06400641},
-	{0x0000b184, 0x0660065f},
-	{0x0000b188, 0x067e067f},
-	{0x0000b18c, 0x07410742},
-	{0x0000b190, 0x075f0740},
-	{0x0000b194, 0x077f0760},
-	{0x0000b198, 0x07800781},
-	{0x0000b19c, 0x07a0079f},
-	{0x0000b1a0, 0x07c107bf},
-	{0x0000b1a4, 0x000007c0},
-	{0x0000b1a8, 0x00000000},
-	{0x0000b1ac, 0x00000000},
-	{0x0000b1b0, 0x00000000},
-	{0x0000b1b4, 0x00000000},
-	{0x0000b1b8, 0x00000000},
-	{0x0000b1bc, 0x00000000},
-	{0x0000b1c0, 0x00000000},
-	{0x0000b1c4, 0x00000000},
-	{0x0000b1c8, 0x00000000},
-	{0x0000b1cc, 0x00000000},
-	{0x0000b1d0, 0x00000000},
-	{0x0000b1d4, 0x00000000},
-	{0x0000b1d8, 0x00000000},
-	{0x0000b1dc, 0x00000000},
-	{0x0000b1e0, 0x00000000},
-	{0x0000b1e4, 0x00000000},
-	{0x0000b1e8, 0x00000000},
-	{0x0000b1ec, 0x00000000},
-	{0x0000b1f0, 0x00000396},
-	{0x0000b1f4, 0x00000396},
-	{0x0000b1f8, 0x00000396},
-	{0x0000b1fc, 0x00000196},
-};
-
-static const u32 ar9462_2p1_modes_low_ob_db_tx_gain[][5] = {
-	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
-	{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
-	{0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
-	{0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
-	{0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
-	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
-	{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
-	{0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
-	{0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
-	{0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
-	{0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
-	{0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
-	{0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
-	{0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
-	{0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
-	{0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
-	{0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
-	{0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
-	{0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
-	{0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
-	{0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
-	{0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
-	{0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
-	{0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
-	{0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
-	{0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
-	{0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
-	{0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
-	{0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
-	{0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
-	{0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
-	{0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
-	{0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
-	{0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
-	{0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
-	{0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
-	{0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
-	{0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
-	{0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
-	{0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
-	{0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
-	{0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
-	{0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
-	{0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
-	{0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
-	{0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
-	{0x00016048, 0x64992060, 0x64992060, 0x64992060, 0x64992060},
-	{0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
-	{0x00016444, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
-	{0x00016448, 0x64992000, 0x64992000, 0x64992000, 0x64992000},
-	{0x00016454, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
-};
-
-static const u32 ar9462_2p1_modes_high_ob_db_tx_gain[][5] = {
-	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
-	{0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
-	{0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
-	{0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
-	{0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
-	{0x0000a410, 0x000050da, 0x000050da, 0x000050de, 0x000050de},
-	{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
-	{0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
-	{0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
-	{0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
-	{0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
-	{0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
-	{0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
-	{0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
-	{0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
-	{0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
-	{0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
-	{0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
-	{0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
-	{0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
-	{0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
-	{0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
-	{0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
-	{0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
-	{0x0000a548, 0x55025eb3, 0x55025eb3, 0x3e001a81, 0x3e001a81},
-	{0x0000a54c, 0x58025ef3, 0x58025ef3, 0x42001a83, 0x42001a83},
-	{0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001a84, 0x44001a84},
-	{0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
-	{0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
-	{0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
-	{0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
-	{0x0000a564, 0x751ffff6, 0x751ffff6, 0x56001eec, 0x56001eec},
-	{0x0000a568, 0x751ffff6, 0x751ffff6, 0x58001ef0, 0x58001ef0},
-	{0x0000a56c, 0x751ffff6, 0x751ffff6, 0x5a001ef4, 0x5a001ef4},
-	{0x0000a570, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
-	{0x0000a574, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
-	{0x0000a578, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
-	{0x0000a57c, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
-	{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
-	{0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
-	{0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
-	{0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
-	{0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
-	{0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
-	{0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
-	{0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
-	{0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
-	{0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
-	{0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
-	{0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
-	{0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
-	{0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
-	{0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
-	{0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
-	{0x00016044, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
-	{0x00016048, 0x8db49060, 0x8db49060, 0x8db49060, 0x8db49060},
-	{0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
-	{0x00016444, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
-	{0x00016448, 0x8db49000, 0x8db49000, 0x8db49000, 0x8db49000},
-	{0x00016454, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
-};
-
-static const u32 ar9462_2p1_modes_mix_ob_db_tx_gain[][5] = {
-	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
-	{0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
-	{0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
-	{0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
-	{0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
-	{0x0000a410, 0x0000d0da, 0x0000d0da, 0x0000d0de, 0x0000d0de},
-	{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
-	{0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
-	{0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
-	{0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
-	{0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
-	{0x0000a514, 0x18022622, 0x18022622, 0x12000400, 0x12000400},
-	{0x0000a518, 0x1b022822, 0x1b022822, 0x16000402, 0x16000402},
-	{0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
-	{0x0000a520, 0x22022c41, 0x22022c41, 0x1c000603, 0x1c000603},
-	{0x0000a524, 0x28023042, 0x28023042, 0x21000a02, 0x21000a02},
-	{0x0000a528, 0x2c023044, 0x2c023044, 0x25000a04, 0x25000a04},
-	{0x0000a52c, 0x2f023644, 0x2f023644, 0x28000a20, 0x28000a20},
-	{0x0000a530, 0x34025643, 0x34025643, 0x2c000e20, 0x2c000e20},
-	{0x0000a534, 0x38025a44, 0x38025a44, 0x30000e22, 0x30000e22},
-	{0x0000a538, 0x3b025e45, 0x3b025e45, 0x34000e24, 0x34000e24},
-	{0x0000a53c, 0x41025e4a, 0x41025e4a, 0x38001640, 0x38001640},
-	{0x0000a540, 0x48025e6c, 0x48025e6c, 0x3c001660, 0x3c001660},
-	{0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3f001861, 0x3f001861},
-	{0x0000a548, 0x55025eb3, 0x55025eb3, 0x43001a81, 0x43001a81},
-	{0x0000a54c, 0x58025ef3, 0x58025ef3, 0x47001a83, 0x47001a83},
-	{0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x4a001c84, 0x4a001c84},
-	{0x0000a554, 0x62025f56, 0x62025f56, 0x4e001ce3, 0x4e001ce3},
-	{0x0000a558, 0x66027f56, 0x66027f56, 0x52001ce5, 0x52001ce5},
-	{0x0000a55c, 0x6a029f56, 0x6a029f56, 0x56001ce9, 0x56001ce9},
-	{0x0000a560, 0x70049f56, 0x70049f56, 0x5a001ceb, 0x5a001ceb},
-	{0x0000a564, 0x751ffff6, 0x751ffff6, 0x5c001eec, 0x5c001eec},
-	{0x0000a568, 0x751ffff6, 0x751ffff6, 0x5e001ef0, 0x5e001ef0},
-	{0x0000a56c, 0x751ffff6, 0x751ffff6, 0x60001ef4, 0x60001ef4},
-	{0x0000a570, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
-	{0x0000a574, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
-	{0x0000a578, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
-	{0x0000a57c, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
-	{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
-	{0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
-	{0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
-	{0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
-	{0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
-	{0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
-	{0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
-	{0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
-	{0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
-	{0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
-	{0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
-	{0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
-	{0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
-	{0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
-	{0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
-	{0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
-};
-
-static const u32 ar9462_2p1_modes_fast_clock[][3] = {
-	/* Addr      5G_HT20     5G_HT40   */
-	{0x00001030, 0x00000268, 0x000004d0},
-	{0x00001070, 0x0000018c, 0x00000318},
-	{0x000010b0, 0x00000fd0, 0x00001fa0},
-	{0x00008014, 0x044c044c, 0x08980898},
-	{0x0000801c, 0x148ec02b, 0x148ec057},
-	{0x00008318, 0x000044c0, 0x00008980},
-	{0x00009e00, 0x0372131c, 0x0372131c},
-	{0x0000a230, 0x0000400b, 0x00004016},
-	{0x0000a254, 0x00000898, 0x00001130},
-};
-
-static const u32 ar9462_2p1_baseband_core_txfir_coeff_japan_2484[][2] = {
-	/* Addr      allmodes  */
-	{0x0000a398, 0x00000000},
-	{0x0000a39c, 0x6f7f0301},
-	{0x0000a3a0, 0xca9228ee},
-};
-
 #endif /* INITVALS_9462_2P1_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index 7c1845221e1c..ce83ce47a1ca 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -20,17 +20,11 @@
 
 /* AR9485 1.1 */
 
-static const u32 ar9485_1_1_mac_postamble[][5] = {
-	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
-	{0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
-	{0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
-	{0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
-	{0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
-	{0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
-	{0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
-	{0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
+#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
+
+#define ar9485_1_1_mac_postamble ar9331_1p1_mac_postamble
+
+#define ar9485_1_1_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
 
 static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
 	/* Addr      allmodes  */
@@ -546,100 +540,6 @@ static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
 	{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
 };
 
-static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
-	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
-	{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
-	{0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
-	{0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
-	{0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
-	{0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
-	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
-	{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
-	{0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
-	{0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
-	{0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
-	{0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
-	{0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
-	{0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
-	{0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
-	{0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
-	{0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
-	{0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
-	{0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
-	{0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
-	{0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
-	{0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
-	{0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
-	{0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
-	{0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
-	{0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
-	{0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
-	{0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
-	{0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
-	{0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
-	{0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-	{0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
-	{0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
-	{0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
-	{0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
-	{0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
-	{0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
-	{0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
-	{0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
-	{0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
-	{0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
-	{0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
-	{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
-};
-
 static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
@@ -1323,13 +1223,6 @@ static const u32 ar9485_1_1_mac_core[][2] = {
 	{0x000083d0, 0x000301ff},
 };
 
-static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
-	/* Addr      allmodes  */
-	{0x0000a398, 0x00000000},
-	{0x0000a39c, 0x6f7f0301},
-	{0x0000a3a0, 0xca9228ee},
-};
-
 static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
 	/* Addr      allmodes  */
 	{0x00018c00, 0x18013e5e},
diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
new file mode 100644
index 000000000000..3c9113d9b1bc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
@@ -0,0 +1,718 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_953X_H
+#define INITVALS_953X_H
+
+#define qca953x_1p0_mac_postamble ar9300_2p2_mac_postamble
+
+#define qca953x_1p0_soc_postamble ar9300_2p2_soc_postamble
+
+#define qca953x_1p0_common_rx_gain_table ar9300Common_rx_gain_table_2p2
+
+#define qca953x_1p0_common_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define qca953x_1p0_modes_fast_clock ar9300Modes_fast_clock_2p2
+
+static const u32 qca953x_1p0_mac_core[][2] = {
+	/* Addr      allmodes  */
+	{0x00000008, 0x00000000},
+	{0x00000030, 0x00020085},
+	{0x00000034, 0x00000005},
+	{0x00000040, 0x00000000},
+	{0x00000044, 0x00000000},
+	{0x00000048, 0x00000008},
+	{0x0000004c, 0x00000010},
+	{0x00000050, 0x00000000},
+	{0x00001040, 0x002ffc0f},
+	{0x00001044, 0x002ffc0f},
+	{0x00001048, 0x002ffc0f},
+	{0x0000104c, 0x002ffc0f},
+	{0x00001050, 0x002ffc0f},
+	{0x00001054, 0x002ffc0f},
+	{0x00001058, 0x002ffc0f},
+	{0x0000105c, 0x002ffc0f},
+	{0x00001060, 0x002ffc0f},
+	{0x00001064, 0x002ffc0f},
+	{0x000010f0, 0x00000100},
+	{0x00001270, 0x00000000},
+	{0x000012b0, 0x00000000},
+	{0x000012f0, 0x00000000},
+	{0x0000143c, 0x00000000},
+	{0x0000147c, 0x00000000},
+	{0x00008000, 0x00000000},
+	{0x00008004, 0x00000000},
+	{0x00008008, 0x00000000},
+	{0x0000800c, 0x00000000},
+	{0x00008018, 0x00000000},
+	{0x00008020, 0x00000000},
+	{0x00008038, 0x00000000},
+	{0x0000803c, 0x00000000},
+	{0x00008040, 0x00000000},
+	{0x00008044, 0x00000000},
+	{0x00008048, 0x00000000},
+	{0x0000804c, 0xffffffff},
+	{0x00008054, 0x00000000},
+	{0x00008058, 0x00000000},
+	{0x0000805c, 0x000fc78f},
+	{0x00008060, 0x0000000f},
+	{0x00008064, 0x00000000},
+	{0x00008070, 0x00000310},
+	{0x00008074, 0x00000020},
+	{0x00008078, 0x00000000},
+	{0x0000809c, 0x0000000f},
+	{0x000080a0, 0x00000000},
+	{0x000080a4, 0x02ff0000},
+	{0x000080a8, 0x0e070605},
+	{0x000080ac, 0x0000000d},
+	{0x000080b0, 0x00000000},
+	{0x000080b4, 0x00000000},
+	{0x000080b8, 0x00000000},
+	{0x000080bc, 0x00000000},
+	{0x000080c0, 0x2a800000},
+	{0x000080c4, 0x06900168},
+	{0x000080c8, 0x13881c22},
+	{0x000080cc, 0x01f40000},
+	{0x000080d0, 0x00252500},
+	{0x000080d4, 0x00a00000},
+	{0x000080d8, 0x00400000},
+	{0x000080dc, 0x00000000},
+	{0x000080e0, 0xffffffff},
+	{0x000080e4, 0x0000ffff},
+	{0x000080e8, 0x3f3f3f3f},
+	{0x000080ec, 0x00000000},
+	{0x000080f0, 0x00000000},
+	{0x000080f4, 0x00000000},
+	{0x000080fc, 0x00020000},
+	{0x00008100, 0x00000000},
+	{0x00008108, 0x00000052},
+	{0x0000810c, 0x00000000},
+	{0x00008110, 0x00000000},
+	{0x00008114, 0x000007ff},
+	{0x00008118, 0x000000aa},
+	{0x0000811c, 0x00003210},
+	{0x00008124, 0x00000000},
+	{0x00008128, 0x00000000},
+	{0x0000812c, 0x00000000},
+	{0x00008130, 0x00000000},
+	{0x00008134, 0x00000000},
+	{0x00008138, 0x00000000},
+	{0x0000813c, 0x0000ffff},
+	{0x00008140, 0x000000fe},
+	{0x00008144, 0xffffffff},
+	{0x00008168, 0x00000000},
+	{0x0000816c, 0x00000000},
+	{0x000081c0, 0x00000000},
+	{0x000081c4, 0x33332210},
+	{0x000081ec, 0x00000000},
+	{0x000081f0, 0x00000000},
+	{0x000081f4, 0x00000000},
+	{0x000081f8, 0x00000000},
+	{0x000081fc, 0x00000000},
+	{0x00008240, 0x00100000},
+	{0x00008244, 0x0010f3d7},
+	{0x00008248, 0x00000852},
+	{0x0000824c, 0x0001e7ae},
+	{0x00008250, 0x00000000},
+	{0x00008254, 0x00000000},
+	{0x00008258, 0x00000000},
+	{0x0000825c, 0x40000000},
+	{0x00008260, 0x00080922},
+	{0x00008264, 0x9d400010},
+	{0x00008268, 0xffffffff},
+	{0x0000826c, 0x0000ffff},
+	{0x00008270, 0x00000000},
+	{0x00008274, 0x40000000},
+	{0x00008278, 0x003e4180},
+	{0x0000827c, 0x00000004},
+	{0x00008284, 0x0000002c},
+	{0x00008288, 0x0000002c},
+	{0x0000828c, 0x000000ff},
+	{0x00008294, 0x00000000},
+	{0x00008298, 0x00000000},
+	{0x0000829c, 0x00000000},
+	{0x00008300, 0x00001d40},
+	{0x00008314, 0x00000000},
+	{0x0000831c, 0x0000010d},
+	{0x00008328, 0x00000000},
+	{0x0000832c, 0x0000001f},
+	{0x00008330, 0x00000302},
+	{0x00008334, 0x00000700},
+	{0x00008338, 0xffff0000},
+	{0x0000833c, 0x02400000},
+	{0x00008340, 0x000107ff},
+	{0x00008344, 0xaa48107b},
+	{0x00008348, 0x008f0000},
+	{0x0000835c, 0x00000000},
+	{0x00008360, 0xffffffff},
+	{0x00008364, 0xffffffff},
+	{0x00008368, 0x00000000},
+	{0x00008370, 0x00000000},
+	{0x00008374, 0x000000ff},
+	{0x00008378, 0x00000000},
+	{0x0000837c, 0x00000000},
+	{0x00008380, 0xffffffff},
+	{0x00008384, 0xffffffff},
+	{0x00008390, 0xffffffff},
+	{0x00008394, 0xffffffff},
+	{0x00008398, 0x00000000},
+	{0x0000839c, 0x00000000},
+	{0x000083a0, 0x00000000},
+	{0x000083a4, 0x0000fa14},
+	{0x000083a8, 0x000f0c00},
+	{0x000083ac, 0x33332210},
+	{0x000083b0, 0x33332210},
+	{0x000083b4, 0x33332210},
+	{0x000083b8, 0x33332210},
+	{0x000083bc, 0x00000000},
+	{0x000083c0, 0x00000000},
+	{0x000083c4, 0x00000000},
+	{0x000083c8, 0x00000000},
+	{0x000083cc, 0x00000200},
+	{0x000083d0, 0x8c7901ff},
+};
+
+static const u32 qca953x_1p0_baseband_core[][2] = {
+	/* Addr      allmodes  */
+	{0x00009800, 0xafe68e30},
+	{0x00009804, 0xfd14e000},
+	{0x00009808, 0x9c0a9f6b},
+	{0x0000980c, 0x04900000},
+	{0x00009814, 0x0280c00a},
+	{0x00009818, 0x00000000},
+	{0x0000981c, 0x00020028},
+	{0x00009834, 0x6400a190},
+	{0x00009838, 0x0108ecff},
+	{0x0000983c, 0x14000600},
+	{0x00009880, 0x201fff00},
+	{0x00009884, 0x00001042},
+	{0x000098a4, 0x00200400},
+	{0x000098b0, 0x32840bbe},
+	{0x000098bc, 0x00000002},
+	{0x000098d0, 0x004b6a8e},
+	{0x000098d4, 0x00000820},
+	{0x000098dc, 0x00000000},
+	{0x000098f0, 0x00000000},
+	{0x000098f4, 0x00000000},
+	{0x00009c04, 0xff55ff55},
+	{0x00009c08, 0x0320ff55},
+	{0x00009c0c, 0x00000000},
+	{0x00009c10, 0x00000000},
+	{0x00009c14, 0x00046384},
+	{0x00009c18, 0x05b6b440},
+	{0x00009c1c, 0x00b6b440},
+	{0x00009d00, 0xc080a333},
+	{0x00009d04, 0x40206c10},
+	{0x00009d08, 0x009c4060},
+	{0x00009d0c, 0x9883800a},
+	{0x00009d10, 0x01884061},
+	{0x00009d14, 0x00c0040b},
+	{0x00009d18, 0x00000000},
+	{0x00009e08, 0x0038230c},
+	{0x00009e24, 0x990bb515},
+	{0x00009e28, 0x0c6f0000},
+	{0x00009e30, 0x06336f77},
+	{0x00009e34, 0x6af6532f},
+	{0x00009e38, 0x0cc80c00},
+	{0x00009e40, 0x0d261820},
+	{0x00009e4c, 0x00001004},
+	{0x00009e50, 0x00ff03f1},
+	{0x00009fc0, 0x813e4788},
+	{0x00009fc4, 0x0001efb5},
+	{0x00009fcc, 0x40000014},
+	{0x00009fd0, 0x01193b91},
+	{0x0000a20c, 0x00000000},
+	{0x0000a220, 0x00000000},
+	{0x0000a224, 0x00000000},
+	{0x0000a228, 0x10002310},
+	{0x0000a23c, 0x00000000},
+	{0x0000a244, 0x0c000000},
+	{0x0000a248, 0x00000140},
+	{0x0000a2a0, 0x00000007},
+	{0x0000a2c0, 0x00000007},
+	{0x0000a2c8, 0x00000000},
+	{0x0000a2d4, 0x00000000},
+	{0x0000a2ec, 0x00000000},
+	{0x0000a2f0, 0x00000000},
+	{0x0000a2f4, 0x00000000},
+	{0x0000a2f8, 0x00000000},
+	{0x0000a344, 0x00000000},
+	{0x0000a34c, 0x00000000},
+	{0x0000a350, 0x0000a000},
+	{0x0000a364, 0x00000000},
+	{0x0000a370, 0x00000000},
+	{0x0000a390, 0x00000001},
+	{0x0000a394, 0x00000444},
+	{0x0000a398, 0x1f020503},
+	{0x0000a39c, 0x29180c03},
+	{0x0000a3a0, 0x9a8b6844},
+	{0x0000a3a4, 0x000000ff},
+	{0x0000a3a8, 0x6a6a6a6a},
+	{0x0000a3ac, 0x6a6a6a6a},
+	{0x0000a3b0, 0x00c8641a},
+	{0x0000a3b4, 0x0000001a},
+	{0x0000a3b8, 0x0088642a},
+	{0x0000a3bc, 0x000001fa},
+	{0x0000a3c0, 0x20202020},
+	{0x0000a3c4, 0x22222220},
+	{0x0000a3c8, 0x20200020},
+	{0x0000a3cc, 0x20202020},
+	{0x0000a3d0, 0x20202020},
+	{0x0000a3d4, 0x20202020},
+	{0x0000a3d8, 0x20202020},
+	{0x0000a3dc, 0x20202020},
+	{0x0000a3e0, 0x20202020},
+	{0x0000a3e4, 0x20202020},
+	{0x0000a3e8, 0x20202020},
+	{0x0000a3ec, 0x20202020},
+	{0x0000a3f0, 0x00000000},
+	{0x0000a3f4, 0x00000000},
+	{0x0000a3f8, 0x0c9bd380},
+	{0x0000a3fc, 0x000f0f01},
+	{0x0000a400, 0x8fa91f01},
+	{0x0000a404, 0x00000000},
+	{0x0000a408, 0x0e79e5c6},
+	{0x0000a40c, 0x00820820},
+	{0x0000a414, 0x1ce42108},
+	{0x0000a418, 0x2d001dce},
+	{0x0000a41c, 0x1ce73908},
+	{0x0000a420, 0x000001ce},
+	{0x0000a424, 0x1ce738e7},
+	{0x0000a428, 0x000001ce},
+	{0x0000a42c, 0x1ce739ce},
+	{0x0000a430, 0x1ce739ce},
+	{0x0000a434, 0x00000000},
+	{0x0000a438, 0x00001801},
+	{0x0000a43c, 0x00100000},
+	{0x0000a444, 0x00000000},
+	{0x0000a448, 0x05000080},
+	{0x0000a44c, 0x00000001},
+	{0x0000a450, 0x00010000},
+	{0x0000a458, 0x00000000},
+	{0x0000a644, 0xbfad9d74},
+	{0x0000a648, 0x0048060a},
+	{0x0000a64c, 0x00003c37},
+	{0x0000a670, 0x03020100},
+	{0x0000a674, 0x09080504},
+	{0x0000a678, 0x0d0c0b0a},
+	{0x0000a67c, 0x13121110},
+	{0x0000a680, 0x31301514},
+	{0x0000a684, 0x35343332},
+	{0x0000a688, 0x00000036},
+	{0x0000a690, 0x08000838},
+	{0x0000a7cc, 0x00000000},
+	{0x0000a7d0, 0x00000000},
+	{0x0000a7d4, 0x00000004},
+	{0x0000a7dc, 0x00000000},
+	{0x0000a8d0, 0x004b6a8e},
+	{0x0000a8d4, 0x00000820},
+	{0x0000a8dc, 0x00000000},
+	{0x0000a8f0, 0x00000000},
+	{0x0000a8f4, 0x00000000},
+	{0x0000b2d0, 0x00000080},
+	{0x0000b2d4, 0x00000000},
+	{0x0000b2ec, 0x00000000},
+	{0x0000b2f0, 0x00000000},
+	{0x0000b2f4, 0x00000000},
+	{0x0000b2f8, 0x00000000},
+	{0x0000b408, 0x0e79e5c0},
+	{0x0000b40c, 0x00820820},
+	{0x0000b420, 0x00000000},
+};
+
+static const u32 qca953x_1p0_baseband_postamble[][5] = {
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+	{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+	{0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+	{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+	{0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+	{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+	{0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+	{0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+	{0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+	{0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+	{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+	{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+	{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+	{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+	{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+	{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+	{0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10822, 0xcfa10822},
+	{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+	{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+	{0x0000a204, 0x005c0ec0, 0x005c0ec4, 0x005c0ec4, 0x005c0ec0},
+	{0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+	{0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
+	{0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+	{0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+	{0x0000a238, 0xffb01018, 0xffb01018, 0xffb01018, 0xffb01018},
+	{0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+	{0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+	{0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+	{0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01010e0e, 0x01010e0e},
+	{0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+	{0x0000a264, 0x00000e0e, 0x00000e0e, 0x01000e0e, 0x01000e0e},
+	{0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+	{0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+	{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+	{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+	{0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
+	{0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
+	{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+	{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+	{0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+	{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+	{0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+	{0x0000b284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+};
+
+static const u32 qca953x_1p0_radio_core[][2] = {
+	/* Addr      allmodes  */
+	{0x00016000, 0x36db6db6},
+	{0x00016004, 0x6db6db40},
+	{0x00016008, 0x73f00000},
+	{0x0001600c, 0x00000000},
+	{0x00016040, 0x3f80fff8},
+	{0x0001604c, 0x000f0278},
+	{0x00016050, 0x8036db6c},
+	{0x00016054, 0x6db60000},
+	{0x00016080, 0x00080000},
+	{0x00016084, 0x0e48048c},
+	{0x00016088, 0x14214514},
+	{0x0001608c, 0x119f080a},
+	{0x00016090, 0x24926490},
+	{0x00016094, 0x00000000},
+	{0x000160a0, 0xc2108ffe},
+	{0x000160a4, 0x812fc370},
+	{0x000160a8, 0x423c8000},
+	{0x000160b4, 0x92480080},
+	{0x000160c0, 0x006db6d8},
+	{0x000160c4, 0x24b6db6c},
+	{0x000160c8, 0x6db6db6c},
+	{0x000160cc, 0x6db6fb7c},
+	{0x000160d0, 0x6db6da44},
+	{0x00016100, 0x07ff8001},
+	{0x00016108, 0x00080010},
+	{0x00016144, 0x01884080},
+	{0x00016148, 0x000080d8},
+	{0x00016280, 0x01000901},
+	{0x00016284, 0x15d30000},
+	{0x00016288, 0x00318000},
+	{0x0001628c, 0x50000000},
+	{0x00016380, 0x00000000},
+	{0x00016384, 0x00000000},
+	{0x00016388, 0x00800700},
+	{0x0001638c, 0x00800700},
+	{0x00016390, 0x00800700},
+	{0x00016394, 0x00000000},
+	{0x00016398, 0x00000000},
+	{0x0001639c, 0x00000000},
+	{0x000163a0, 0x00000001},
+	{0x000163a4, 0x00000001},
+	{0x000163a8, 0x00000000},
+	{0x000163ac, 0x00000000},
+	{0x000163b0, 0x00000000},
+	{0x000163b4, 0x00000000},
+	{0x000163b8, 0x00000000},
+	{0x000163bc, 0x00000000},
+	{0x000163c0, 0x000000a0},
+	{0x000163c4, 0x000c0000},
+	{0x000163c8, 0x14021402},
+	{0x000163cc, 0x00001402},
+	{0x000163d0, 0x00000000},
+	{0x000163d4, 0x00000000},
+	{0x00016400, 0x36db6db6},
+	{0x00016404, 0x6db6db40},
+	{0x00016408, 0x73f00000},
+	{0x0001640c, 0x00000000},
+	{0x00016440, 0x3f80fff8},
+	{0x0001644c, 0x000f0278},
+	{0x00016450, 0x8036db6c},
+	{0x00016454, 0x6db60000},
+	{0x00016500, 0x07ff8001},
+	{0x00016508, 0x00080010},
+	{0x00016544, 0x01884080},
+	{0x00016548, 0x000080d8},
+	{0x00016780, 0x00000000},
+	{0x00016784, 0x00000000},
+	{0x00016788, 0x00800700},
+	{0x0001678c, 0x00800700},
+	{0x00016790, 0x00800700},
+	{0x00016794, 0x00000000},
+	{0x00016798, 0x00000000},
+	{0x0001679c, 0x00000000},
+	{0x000167a0, 0x00000001},
+	{0x000167a4, 0x00000001},
+	{0x000167a8, 0x00000000},
+	{0x000167ac, 0x00000000},
+	{0x000167b0, 0x00000000},
+	{0x000167b4, 0x00000000},
+	{0x000167b8, 0x00000000},
+	{0x000167bc, 0x00000000},
+	{0x000167c0, 0x000000a0},
+	{0x000167c4, 0x000c0000},
+	{0x000167c8, 0x14021402},
+	{0x000167cc, 0x00001402},
+	{0x000167d0, 0x00000000},
+	{0x000167d4, 0x00000000},
+};
+
+static const u32 qca953x_1p0_radio_postamble[][5] = {
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+	{0x00016098, 0xd2dd5554, 0xd2dd5554, 0xc4128f5c, 0xc4128f5c},
+	{0x0001609c, 0x0a566f3a, 0x0a566f3a, 0x0fd08f25, 0x0fd08f25},
+	{0x000160ac, 0xa4647c00, 0xa4647c00, 0x24646800, 0x24646800},
+	{0x000160b0, 0x01885f52, 0x01885f52, 0x00fe7f46, 0x00fe7f46},
+	{0x00016104, 0xb7a00001, 0xb7a00001, 0xfff80005, 0xfff80005},
+	{0x0001610c, 0xc0000000, 0xc0000000, 0x00000000, 0x00000000},
+	{0x00016140, 0x10804008, 0x10804008, 0x50804000, 0x50804000},
+	{0x00016504, 0xb7a00001, 0xb7a00001, 0xfff80001, 0xfff80001},
+	{0x0001650c, 0xc0000000, 0xc0000000, 0x00000000, 0x00000000},
+	{0x00016540, 0x10804008, 0x10804008, 0x50804000, 0x50804000},
+};
+
+static const u32 qca953x_1p0_soc_preamble[][2] = {
+	/* Addr      allmodes  */
+	{0x00007000, 0x00000000},
+	{0x00007004, 0x00000000},
+	{0x00007008, 0x00000000},
+	{0x0000700c, 0x00000000},
+	{0x0000701c, 0x00000000},
+	{0x00007020, 0x00000000},
+	{0x00007024, 0x00000000},
+	{0x00007028, 0x00000000},
+	{0x0000702c, 0x00000000},
+	{0x00007030, 0x00000000},
+	{0x00007034, 0x00000002},
+	{0x00007038, 0x000004c2},
+	{0x00007048, 0x00000000},
+};
+
+static const u32 qca953x_1p0_common_rx_gain_bounds[][5] = {
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+	{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302018, 0x50302018},
+};
+
+static const u32 qca953x_1p0_common_wo_xlna_rx_gain_bounds[][5] = {
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+	{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+};
+
+static const u32 qca953x_1p0_modes_xpa_tx_gain_table[][2] = {
+	/* Addr      allmodes  */
+	{0x0000a2dc, 0xfffd5aaa},
+	{0x0000a2e0, 0xfffe9ccc},
+	{0x0000a2e4, 0xffffe0f0},
+	{0x0000a2e8, 0xfffcff00},
+	{0x0000a410, 0x000050da},
+	{0x0000a500, 0x00000000},
+	{0x0000a504, 0x04000002},
+	{0x0000a508, 0x08000004},
+	{0x0000a50c, 0x0c000006},
+	{0x0000a510, 0x0f00000a},
+	{0x0000a514, 0x1300000c},
+	{0x0000a518, 0x1700000e},
+	{0x0000a51c, 0x1b000064},
+	{0x0000a520, 0x1f000242},
+	{0x0000a524, 0x23000229},
+	{0x0000a528, 0x270002a2},
+	{0x0000a52c, 0x2c001203},
+	{0x0000a530, 0x30001803},
+	{0x0000a534, 0x33000881},
+	{0x0000a538, 0x38001809},
+	{0x0000a53c, 0x3a000814},
+	{0x0000a540, 0x3f001a0c},
+	{0x0000a544, 0x43001a0e},
+	{0x0000a548, 0x46001812},
+	{0x0000a54c, 0x49001884},
+	{0x0000a550, 0x4d001e84},
+	{0x0000a554, 0x50001e69},
+	{0x0000a558, 0x550006f4},
+	{0x0000a55c, 0x59000ad3},
+	{0x0000a560, 0x5e000ad5},
+	{0x0000a564, 0x61001ced},
+	{0x0000a568, 0x660018d4},
+	{0x0000a56c, 0x660018d4},
+	{0x0000a570, 0x660018d4},
+	{0x0000a574, 0x660018d4},
+	{0x0000a578, 0x660018d4},
+	{0x0000a57c, 0x660018d4},
+	{0x0000a600, 0x00000000},
+	{0x0000a604, 0x00000000},
+	{0x0000a608, 0x00000000},
+	{0x0000a60c, 0x03804000},
+	{0x0000a610, 0x0300ca02},
+	{0x0000a614, 0x00000e04},
+	{0x0000a618, 0x03014000},
+	{0x0000a61c, 0x00000000},
+	{0x0000a620, 0x00000000},
+	{0x0000a624, 0x03014000},
+	{0x0000a628, 0x03804c05},
+	{0x0000a62c, 0x0701de06},
+	{0x0000a630, 0x07819c07},
+	{0x0000a634, 0x0701dc07},
+	{0x0000a638, 0x0701dc07},
+	{0x0000a63c, 0x0701dc07},
+	{0x0000b2dc, 0xfffd5aaa},
+	{0x0000b2e0, 0xfffe9ccc},
+	{0x0000b2e4, 0xffffe0f0},
+	{0x0000b2e8, 0xfffcff00},
+	{0x00016044, 0x010002d4},
+	{0x00016048, 0x66482400},
+	{0x00016280, 0x01000015},
+	{0x00016444, 0x010002d4},
+	{0x00016448, 0x66482400},
+};
+
+static const u32 qca953x_1p0_modes_no_xpa_tx_gain_table[][2] = {
+	/* Addr      allmodes  */
+	{0x0000a2dc, 0xffd5f552},
+	{0x0000a2e0, 0xffe60664},
+	{0x0000a2e4, 0xfff80780},
+	{0x0000a2e8, 0xfffff800},
+	{0x0000a410, 0x000050d6},
+	{0x0000a500, 0x00060020},
+	{0x0000a504, 0x04060060},
+	{0x0000a508, 0x080600a0},
+	{0x0000a50c, 0x0c068020},
+	{0x0000a510, 0x10068060},
+	{0x0000a514, 0x140680a0},
+	{0x0000a518, 0x18090040},
+	{0x0000a51c, 0x1b090080},
+	{0x0000a520, 0x1f0900c0},
+	{0x0000a524, 0x240c0041},
+	{0x0000a528, 0x280d0021},
+	{0x0000a52c, 0x2d0f0061},
+	{0x0000a530, 0x310f00a1},
+	{0x0000a534, 0x350e00a2},
+	{0x0000a538, 0x360e80a2},
+	{0x0000a53c, 0x380f00a2},
+	{0x0000a540, 0x3b0e00a3},
+	{0x0000a544, 0x3d110083},
+	{0x0000a548, 0x3e1100a3},
+	{0x0000a54c, 0x401100e3},
+	{0x0000a550, 0x421380e3},
+	{0x0000a554, 0x431780e3},
+	{0x0000a558, 0x461f80e3},
+	{0x0000a55c, 0x461f80e3},
+	{0x0000a560, 0x461f80e3},
+	{0x0000a564, 0x461f80e3},
+	{0x0000a568, 0x461f80e3},
+	{0x0000a56c, 0x461f80e3},
+	{0x0000a570, 0x461f80e3},
+	{0x0000a574, 0x461f80e3},
+	{0x0000a578, 0x461f80e3},
+	{0x0000a57c, 0x461f80e3},
+	{0x0000a600, 0x00000000},
+	{0x0000a604, 0x00000000},
+	{0x0000a608, 0x00000000},
+	{0x0000a60c, 0x00804201},
+	{0x0000a610, 0x01008201},
+	{0x0000a614, 0x0180c402},
+	{0x0000a618, 0x0180c603},
+	{0x0000a61c, 0x0180c603},
+	{0x0000a620, 0x01c10603},
+	{0x0000a624, 0x01c10704},
+	{0x0000a628, 0x02c18b05},
+	{0x0000a62c, 0x0301cc07},
+	{0x0000a630, 0x0301cc07},
+	{0x0000a634, 0x0301cc07},
+	{0x0000a638, 0x0301cc07},
+	{0x0000a63c, 0x0301cc07},
+	{0x0000b2dc, 0xffd5f552},
+	{0x0000b2e0, 0xffe60664},
+	{0x0000b2e4, 0xfff80780},
+	{0x0000b2e8, 0xfffff800},
+	{0x00016044, 0x049242db},
+	{0x00016048, 0x6c927a70},
+	{0x00016444, 0x049242db},
+	{0x00016448, 0x6c927a70},
+};
+
+static const u32 qca953x_1p1_modes_no_xpa_tx_gain_table[][2] = {
+	/* Addr      allmodes  */
+	{0x0000a2dc, 0xffd5f552},
+	{0x0000a2e0, 0xffe60664},
+	{0x0000a2e4, 0xfff80780},
+	{0x0000a2e8, 0xfffff800},
+	{0x0000a410, 0x000050de},
+	{0x0000a500, 0x00000061},
+	{0x0000a504, 0x04000063},
+	{0x0000a508, 0x08000065},
+	{0x0000a50c, 0x0c000261},
+	{0x0000a510, 0x10000263},
+	{0x0000a514, 0x14000265},
+	{0x0000a518, 0x18000482},
+	{0x0000a51c, 0x1b000484},
+	{0x0000a520, 0x1f000486},
+	{0x0000a524, 0x240008c2},
+	{0x0000a528, 0x28000cc1},
+	{0x0000a52c, 0x2d000ce3},
+	{0x0000a530, 0x31000ce5},
+	{0x0000a534, 0x350010e5},
+	{0x0000a538, 0x360012e5},
+	{0x0000a53c, 0x380014e5},
+	{0x0000a540, 0x3b0018e5},
+	{0x0000a544, 0x3d001d04},
+	{0x0000a548, 0x3e001d05},
+	{0x0000a54c, 0x40001d07},
+	{0x0000a550, 0x42001f27},
+	{0x0000a554, 0x43001f67},
+	{0x0000a558, 0x46001fe7},
+	{0x0000a55c, 0x47001f2b},
+	{0x0000a560, 0x49001f0d},
+	{0x0000a564, 0x4b001ed2},
+	{0x0000a568, 0x4c001ed4},
+	{0x0000a56c, 0x4e001f15},
+	{0x0000a570, 0x4f001ff6},
+	{0x0000a574, 0x4f001ff6},
+	{0x0000a578, 0x4f001ff6},
+	{0x0000a57c, 0x4f001ff6},
+	{0x0000a600, 0x00000000},
+	{0x0000a604, 0x00000000},
+	{0x0000a608, 0x00000000},
+	{0x0000a60c, 0x00804201},
+	{0x0000a610, 0x01008201},
+	{0x0000a614, 0x0180c402},
+	{0x0000a618, 0x0180c603},
+	{0x0000a61c, 0x0180c603},
+	{0x0000a620, 0x01c10603},
+	{0x0000a624, 0x01c10704},
+	{0x0000a628, 0x02c18b05},
+	{0x0000a62c, 0x02c14c07},
+	{0x0000a630, 0x01008704},
+	{0x0000a634, 0x01c10402},
+	{0x0000a638, 0x0301cc07},
+	{0x0000a63c, 0x0301cc07},
+	{0x0000b2dc, 0xffd5f552},
+	{0x0000b2e0, 0xffe60664},
+	{0x0000b2e4, 0xfff80780},
+	{0x0000b2e8, 0xfffff800},
+	{0x00016044, 0x049242db},
+	{0x00016048, 0x6c927a70},
+	{0x00016444, 0x049242db},
+	{0x00016448, 0x6c927a70},
+};
+
+#endif /* INITVALS_953X_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
index ccc5b6c99add..74d8bc05b317 100644
--- a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
@@ -20,6 +20,14 @@
 
 /* AR955X 1.0 */
 
+#define ar955x_1p0_soc_postamble ar9300_2p2_soc_postamble
+
+#define ar955x_1p0_common_rx_gain_table ar9300Common_rx_gain_table_2p2
+
+#define ar955x_1p0_common_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define ar955x_1p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
 static const u32 ar955x_1p0_radio_postamble[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x00016098, 0xd2dd5554, 0xd2dd5554, 0xd28b3330, 0xd28b3330},
@@ -37,13 +45,6 @@ static const u32 ar955x_1p0_radio_postamble[][5] = {
 	{0x00016940, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
 };
 
-static const u32 ar955x_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
-	/* Addr      allmodes  */
-	{0x0000a398, 0x00000000},
-	{0x0000a39c, 0x6f7f0301},
-	{0x0000a3a0, 0xca9228ee},
-};
-
 static const u32 ar955x_1p0_baseband_postamble[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
@@ -473,266 +474,6 @@ static const u32 ar955x_1p0_mac_core[][2] = {
 	{0x000083d0, 0x8c7901ff},
 };
 
-static const u32 ar955x_1p0_common_rx_gain_table[][2] = {
-	/* Addr      allmodes  */
-	{0x0000a000, 0x00010000},
-	{0x0000a004, 0x00030002},
-	{0x0000a008, 0x00050004},
-	{0x0000a00c, 0x00810080},
-	{0x0000a010, 0x00830082},
-	{0x0000a014, 0x01810180},
-	{0x0000a018, 0x01830182},
-	{0x0000a01c, 0x01850184},
-	{0x0000a020, 0x01890188},
-	{0x0000a024, 0x018b018a},
-	{0x0000a028, 0x018d018c},
-	{0x0000a02c, 0x01910190},
-	{0x0000a030, 0x01930192},
-	{0x0000a034, 0x01950194},
-	{0x0000a038, 0x038a0196},
-	{0x0000a03c, 0x038c038b},
-	{0x0000a040, 0x0390038d},
-	{0x0000a044, 0x03920391},
-	{0x0000a048, 0x03940393},
-	{0x0000a04c, 0x03960395},
-	{0x0000a050, 0x00000000},
-	{0x0000a054, 0x00000000},
-	{0x0000a058, 0x00000000},
-	{0x0000a05c, 0x00000000},
-	{0x0000a060, 0x00000000},
-	{0x0000a064, 0x00000000},
-	{0x0000a068, 0x00000000},
-	{0x0000a06c, 0x00000000},
-	{0x0000a070, 0x00000000},
-	{0x0000a074, 0x00000000},
-	{0x0000a078, 0x00000000},
-	{0x0000a07c, 0x00000000},
-	{0x0000a080, 0x22222229},
-	{0x0000a084, 0x1d1d1d1d},
-	{0x0000a088, 0x1d1d1d1d},
-	{0x0000a08c, 0x1d1d1d1d},
-	{0x0000a090, 0x171d1d1d},
-	{0x0000a094, 0x11111717},
-	{0x0000a098, 0x00030311},
-	{0x0000a09c, 0x00000000},
-	{0x0000a0a0, 0x00000000},
-	{0x0000a0a4, 0x00000000},
-	{0x0000a0a8, 0x00000000},
-	{0x0000a0ac, 0x00000000},
-	{0x0000a0b0, 0x00000000},
-	{0x0000a0b4, 0x00000000},
-	{0x0000a0b8, 0x00000000},
-	{0x0000a0bc, 0x00000000},
-	{0x0000a0c0, 0x001f0000},
-	{0x0000a0c4, 0x01000101},
-	{0x0000a0c8, 0x011e011f},
-	{0x0000a0cc, 0x011c011d},
-	{0x0000a0d0, 0x02030204},
-	{0x0000a0d4, 0x02010202},
-	{0x0000a0d8, 0x021f0200},
-	{0x0000a0dc, 0x0302021e},
-	{0x0000a0e0, 0x03000301},
-	{0x0000a0e4, 0x031e031f},
-	{0x0000a0e8, 0x0402031d},
-	{0x0000a0ec, 0x04000401},
-	{0x0000a0f0, 0x041e041f},
-	{0x0000a0f4, 0x0502041d},
-	{0x0000a0f8, 0x05000501},
-	{0x0000a0fc, 0x051e051f},
-	{0x0000a100, 0x06010602},
-	{0x0000a104, 0x061f0600},
-	{0x0000a108, 0x061d061e},
-	{0x0000a10c, 0x07020703},
-	{0x0000a110, 0x07000701},
-	{0x0000a114, 0x00000000},
-	{0x0000a118, 0x00000000},
-	{0x0000a11c, 0x00000000},
-	{0x0000a120, 0x00000000},
-	{0x0000a124, 0x00000000},
-	{0x0000a128, 0x00000000},
-	{0x0000a12c, 0x00000000},
-	{0x0000a130, 0x00000000},
-	{0x0000a134, 0x00000000},
-	{0x0000a138, 0x00000000},
-	{0x0000a13c, 0x00000000},
-	{0x0000a140, 0x001f0000},
-	{0x0000a144, 0x01000101},
-	{0x0000a148, 0x011e011f},
-	{0x0000a14c, 0x011c011d},
-	{0x0000a150, 0x02030204},
-	{0x0000a154, 0x02010202},
-	{0x0000a158, 0x021f0200},
-	{0x0000a15c, 0x0302021e},
-	{0x0000a160, 0x03000301},
-	{0x0000a164, 0x031e031f},
-	{0x0000a168, 0x0402031d},
-	{0x0000a16c, 0x04000401},
-	{0x0000a170, 0x041e041f},
-	{0x0000a174, 0x0502041d},
-	{0x0000a178, 0x05000501},
-	{0x0000a17c, 0x051e051f},
-	{0x0000a180, 0x06010602},
-	{0x0000a184, 0x061f0600},
-	{0x0000a188, 0x061d061e},
-	{0x0000a18c, 0x07020703},
-	{0x0000a190, 0x07000701},
-	{0x0000a194, 0x00000000},
-	{0x0000a198, 0x00000000},
-	{0x0000a19c, 0x00000000},
-	{0x0000a1a0, 0x00000000},
-	{0x0000a1a4, 0x00000000},
-	{0x0000a1a8, 0x00000000},
-	{0x0000a1ac, 0x00000000},
-	{0x0000a1b0, 0x00000000},
-	{0x0000a1b4, 0x00000000},
-	{0x0000a1b8, 0x00000000},
-	{0x0000a1bc, 0x00000000},
-	{0x0000a1c0, 0x00000000},
-	{0x0000a1c4, 0x00000000},
-	{0x0000a1c8, 0x00000000},
-	{0x0000a1cc, 0x00000000},
-	{0x0000a1d0, 0x00000000},
-	{0x0000a1d4, 0x00000000},
-	{0x0000a1d8, 0x00000000},
-	{0x0000a1dc, 0x00000000},
-	{0x0000a1e0, 0x00000000},
-	{0x0000a1e4, 0x00000000},
-	{0x0000a1e8, 0x00000000},
-	{0x0000a1ec, 0x00000000},
-	{0x0000a1f0, 0x00000396},
-	{0x0000a1f4, 0x00000396},
-	{0x0000a1f8, 0x00000396},
-	{0x0000a1fc, 0x00000196},
-	{0x0000b000, 0x00010000},
-	{0x0000b004, 0x00030002},
-	{0x0000b008, 0x00050004},
-	{0x0000b00c, 0x00810080},
-	{0x0000b010, 0x00830082},
-	{0x0000b014, 0x01810180},
-	{0x0000b018, 0x01830182},
-	{0x0000b01c, 0x01850184},
-	{0x0000b020, 0x02810280},
-	{0x0000b024, 0x02830282},
-	{0x0000b028, 0x02850284},
-	{0x0000b02c, 0x02890288},
-	{0x0000b030, 0x028b028a},
-	{0x0000b034, 0x0388028c},
-	{0x0000b038, 0x038a0389},
-	{0x0000b03c, 0x038c038b},
-	{0x0000b040, 0x0390038d},
-	{0x0000b044, 0x03920391},
-	{0x0000b048, 0x03940393},
-	{0x0000b04c, 0x03960395},
-	{0x0000b050, 0x00000000},
-	{0x0000b054, 0x00000000},
-	{0x0000b058, 0x00000000},
-	{0x0000b05c, 0x00000000},
-	{0x0000b060, 0x00000000},
-	{0x0000b064, 0x00000000},
-	{0x0000b068, 0x00000000},
-	{0x0000b06c, 0x00000000},
-	{0x0000b070, 0x00000000},
-	{0x0000b074, 0x00000000},
-	{0x0000b078, 0x00000000},
-	{0x0000b07c, 0x00000000},
-	{0x0000b080, 0x23232323},
-	{0x0000b084, 0x21232323},
-	{0x0000b088, 0x19191c1e},
-	{0x0000b08c, 0x12141417},
-	{0x0000b090, 0x07070e0e},
-	{0x0000b094, 0x03030305},
-	{0x0000b098, 0x00000003},
-	{0x0000b09c, 0x00000000},
-	{0x0000b0a0, 0x00000000},
-	{0x0000b0a4, 0x00000000},
-	{0x0000b0a8, 0x00000000},
-	{0x0000b0ac, 0x00000000},
-	{0x0000b0b0, 0x00000000},
-	{0x0000b0b4, 0x00000000},
-	{0x0000b0b8, 0x00000000},
-	{0x0000b0bc, 0x00000000},
-	{0x0000b0c0, 0x003f0020},
-	{0x0000b0c4, 0x00400041},
-	{0x0000b0c8, 0x0140005f},
-	{0x0000b0cc, 0x0160015f},
-	{0x0000b0d0, 0x017e017f},
-	{0x0000b0d4, 0x02410242},
-	{0x0000b0d8, 0x025f0240},
-	{0x0000b0dc, 0x027f0260},
-	{0x0000b0e0, 0x0341027e},
-	{0x0000b0e4, 0x035f0340},
-	{0x0000b0e8, 0x037f0360},
-	{0x0000b0ec, 0x04400441},
-	{0x0000b0f0, 0x0460045f},
-	{0x0000b0f4, 0x0541047f},
-	{0x0000b0f8, 0x055f0540},
-	{0x0000b0fc, 0x057f0560},
-	{0x0000b100, 0x06400641},
-	{0x0000b104, 0x0660065f},
-	{0x0000b108, 0x067e067f},
-	{0x0000b10c, 0x07410742},
-	{0x0000b110, 0x075f0740},
-	{0x0000b114, 0x077f0760},
-	{0x0000b118, 0x07800781},
-	{0x0000b11c, 0x07a0079f},
-	{0x0000b120, 0x07c107bf},
-	{0x0000b124, 0x000007c0},
-	{0x0000b128, 0x00000000},
-	{0x0000b12c, 0x00000000},
-	{0x0000b130, 0x00000000},
-	{0x0000b134, 0x00000000},
-	{0x0000b138, 0x00000000},
-	{0x0000b13c, 0x00000000},
-	{0x0000b140, 0x003f0020},
-	{0x0000b144, 0x00400041},
-	{0x0000b148, 0x0140005f},
-	{0x0000b14c, 0x0160015f},
-	{0x0000b150, 0x017e017f},
-	{0x0000b154, 0x02410242},
-	{0x0000b158, 0x025f0240},
-	{0x0000b15c, 0x027f0260},
-	{0x0000b160, 0x0341027e},
-	{0x0000b164, 0x035f0340},
-	{0x0000b168, 0x037f0360},
-	{0x0000b16c, 0x04400441},
-	{0x0000b170, 0x0460045f},
-	{0x0000b174, 0x0541047f},
-	{0x0000b178, 0x055f0540},
-	{0x0000b17c, 0x057f0560},
-	{0x0000b180, 0x06400641},
-	{0x0000b184, 0x0660065f},
-	{0x0000b188, 0x067e067f},
-	{0x0000b18c, 0x07410742},
-	{0x0000b190, 0x075f0740},
-	{0x0000b194, 0x077f0760},
-	{0x0000b198, 0x07800781},
-	{0x0000b19c, 0x07a0079f},
-	{0x0000b1a0, 0x07c107bf},
-	{0x0000b1a4, 0x000007c0},
-	{0x0000b1a8, 0x00000000},
-	{0x0000b1ac, 0x00000000},
-	{0x0000b1b0, 0x00000000},
-	{0x0000b1b4, 0x00000000},
-	{0x0000b1b8, 0x00000000},
-	{0x0000b1bc, 0x00000000},
-	{0x0000b1c0, 0x00000000},
-	{0x0000b1c4, 0x00000000},
-	{0x0000b1c8, 0x00000000},
-	{0x0000b1cc, 0x00000000},
-	{0x0000b1d0, 0x00000000},
-	{0x0000b1d4, 0x00000000},
-	{0x0000b1d8, 0x00000000},
-	{0x0000b1dc, 0x00000000},
-	{0x0000b1e0, 0x00000000},
-	{0x0000b1e4, 0x00000000},
-	{0x0000b1e8, 0x00000000},
-	{0x0000b1ec, 0x00000000},
-	{0x0000b1f0, 0x00000396},
-	{0x0000b1f4, 0x00000396},
-	{0x0000b1f8, 0x00000396},
-	{0x0000b1fc, 0x00000196},
-};
-
 static const u32 ar955x_1p0_baseband_core[][2] = {
 	/* Addr      allmodes  */
 	{0x00009800, 0xafe68e30},
@@ -891,266 +632,6 @@ static const u32 ar955x_1p0_baseband_core[][2] = {
 	{0x0000c420, 0x00000000},
 };
 
-static const u32 ar955x_1p0_common_wo_xlna_rx_gain_table[][2] = {
-	/* Addr      allmodes  */
-	{0x0000a000, 0x00010000},
-	{0x0000a004, 0x00030002},
-	{0x0000a008, 0x00050004},
-	{0x0000a00c, 0x00810080},
-	{0x0000a010, 0x00830082},
-	{0x0000a014, 0x01810180},
-	{0x0000a018, 0x01830182},
-	{0x0000a01c, 0x01850184},
-	{0x0000a020, 0x01890188},
-	{0x0000a024, 0x018b018a},
-	{0x0000a028, 0x018d018c},
-	{0x0000a02c, 0x03820190},
-	{0x0000a030, 0x03840383},
-	{0x0000a034, 0x03880385},
-	{0x0000a038, 0x038a0389},
-	{0x0000a03c, 0x038c038b},
-	{0x0000a040, 0x0390038d},
-	{0x0000a044, 0x03920391},
-	{0x0000a048, 0x03940393},
-	{0x0000a04c, 0x03960395},
-	{0x0000a050, 0x00000000},
-	{0x0000a054, 0x00000000},
-	{0x0000a058, 0x00000000},
-	{0x0000a05c, 0x00000000},
-	{0x0000a060, 0x00000000},
-	{0x0000a064, 0x00000000},
-	{0x0000a068, 0x00000000},
-	{0x0000a06c, 0x00000000},
-	{0x0000a070, 0x00000000},
-	{0x0000a074, 0x00000000},
-	{0x0000a078, 0x00000000},
-	{0x0000a07c, 0x00000000},
-	{0x0000a080, 0x29292929},
-	{0x0000a084, 0x29292929},
-	{0x0000a088, 0x29292929},
-	{0x0000a08c, 0x29292929},
-	{0x0000a090, 0x22292929},
-	{0x0000a094, 0x1d1d2222},
-	{0x0000a098, 0x0c111117},
-	{0x0000a09c, 0x00030303},
-	{0x0000a0a0, 0x00000000},
-	{0x0000a0a4, 0x00000000},
-	{0x0000a0a8, 0x00000000},
-	{0x0000a0ac, 0x00000000},
-	{0x0000a0b0, 0x00000000},
-	{0x0000a0b4, 0x00000000},
-	{0x0000a0b8, 0x00000000},
-	{0x0000a0bc, 0x00000000},
-	{0x0000a0c0, 0x001f0000},
-	{0x0000a0c4, 0x01000101},
-	{0x0000a0c8, 0x011e011f},
-	{0x0000a0cc, 0x011c011d},
-	{0x0000a0d0, 0x02030204},
-	{0x0000a0d4, 0x02010202},
-	{0x0000a0d8, 0x021f0200},
-	{0x0000a0dc, 0x0302021e},
-	{0x0000a0e0, 0x03000301},
-	{0x0000a0e4, 0x031e031f},
-	{0x0000a0e8, 0x0402031d},
-	{0x0000a0ec, 0x04000401},
-	{0x0000a0f0, 0x041e041f},
-	{0x0000a0f4, 0x0502041d},
-	{0x0000a0f8, 0x05000501},
-	{0x0000a0fc, 0x051e051f},
-	{0x0000a100, 0x06010602},
-	{0x0000a104, 0x061f0600},
-	{0x0000a108, 0x061d061e},
-	{0x0000a10c, 0x07020703},
-	{0x0000a110, 0x07000701},
-	{0x0000a114, 0x00000000},
-	{0x0000a118, 0x00000000},
-	{0x0000a11c, 0x00000000},
-	{0x0000a120, 0x00000000},
-	{0x0000a124, 0x00000000},
-	{0x0000a128, 0x00000000},
-	{0x0000a12c, 0x00000000},
-	{0x0000a130, 0x00000000},
-	{0x0000a134, 0x00000000},
-	{0x0000a138, 0x00000000},
-	{0x0000a13c, 0x00000000},
-	{0x0000a140, 0x001f0000},
-	{0x0000a144, 0x01000101},
-	{0x0000a148, 0x011e011f},
-	{0x0000a14c, 0x011c011d},
-	{0x0000a150, 0x02030204},
-	{0x0000a154, 0x02010202},
-	{0x0000a158, 0x021f0200},
-	{0x0000a15c, 0x0302021e},
-	{0x0000a160, 0x03000301},
-	{0x0000a164, 0x031e031f},
-	{0x0000a168, 0x0402031d},
-	{0x0000a16c, 0x04000401},
-	{0x0000a170, 0x041e041f},
-	{0x0000a174, 0x0502041d},
-	{0x0000a178, 0x05000501},
-	{0x0000a17c, 0x051e051f},
-	{0x0000a180, 0x06010602},
-	{0x0000a184, 0x061f0600},
-	{0x0000a188, 0x061d061e},
-	{0x0000a18c, 0x07020703},
-	{0x0000a190, 0x07000701},
-	{0x0000a194, 0x00000000},
-	{0x0000a198, 0x00000000},
-	{0x0000a19c, 0x00000000},
-	{0x0000a1a0, 0x00000000},
-	{0x0000a1a4, 0x00000000},
-	{0x0000a1a8, 0x00000000},
-	{0x0000a1ac, 0x00000000},
-	{0x0000a1b0, 0x00000000},
-	{0x0000a1b4, 0x00000000},
-	{0x0000a1b8, 0x00000000},
-	{0x0000a1bc, 0x00000000},
-	{0x0000a1c0, 0x00000000},
-	{0x0000a1c4, 0x00000000},
-	{0x0000a1c8, 0x00000000},
-	{0x0000a1cc, 0x00000000},
-	{0x0000a1d0, 0x00000000},
-	{0x0000a1d4, 0x00000000},
-	{0x0000a1d8, 0x00000000},
-	{0x0000a1dc, 0x00000000},
-	{0x0000a1e0, 0x00000000},
-	{0x0000a1e4, 0x00000000},
-	{0x0000a1e8, 0x00000000},
-	{0x0000a1ec, 0x00000000},
-	{0x0000a1f0, 0x00000396},
-	{0x0000a1f4, 0x00000396},
-	{0x0000a1f8, 0x00000396},
-	{0x0000a1fc, 0x00000196},
-	{0x0000b000, 0x00010000},
-	{0x0000b004, 0x00030002},
-	{0x0000b008, 0x00050004},
-	{0x0000b00c, 0x00810080},
-	{0x0000b010, 0x00830082},
-	{0x0000b014, 0x01810180},
-	{0x0000b018, 0x01830182},
-	{0x0000b01c, 0x01850184},
-	{0x0000b020, 0x02810280},
-	{0x0000b024, 0x02830282},
-	{0x0000b028, 0x02850284},
-	{0x0000b02c, 0x02890288},
-	{0x0000b030, 0x028b028a},
-	{0x0000b034, 0x0388028c},
-	{0x0000b038, 0x038a0389},
-	{0x0000b03c, 0x038c038b},
-	{0x0000b040, 0x0390038d},
-	{0x0000b044, 0x03920391},
-	{0x0000b048, 0x03940393},
-	{0x0000b04c, 0x03960395},
-	{0x0000b050, 0x00000000},
-	{0x0000b054, 0x00000000},
-	{0x0000b058, 0x00000000},
-	{0x0000b05c, 0x00000000},
-	{0x0000b060, 0x00000000},
-	{0x0000b064, 0x00000000},
-	{0x0000b068, 0x00000000},
-	{0x0000b06c, 0x00000000},
-	{0x0000b070, 0x00000000},
-	{0x0000b074, 0x00000000},
-	{0x0000b078, 0x00000000},
-	{0x0000b07c, 0x00000000},
-	{0x0000b080, 0x32323232},
-	{0x0000b084, 0x2f2f3232},
-	{0x0000b088, 0x23282a2d},
-	{0x0000b08c, 0x1c1e2123},
-	{0x0000b090, 0x14171919},
-	{0x0000b094, 0x0e0e1214},
-	{0x0000b098, 0x03050707},
-	{0x0000b09c, 0x00030303},
-	{0x0000b0a0, 0x00000000},
-	{0x0000b0a4, 0x00000000},
-	{0x0000b0a8, 0x00000000},
-	{0x0000b0ac, 0x00000000},
-	{0x0000b0b0, 0x00000000},
-	{0x0000b0b4, 0x00000000},
-	{0x0000b0b8, 0x00000000},
-	{0x0000b0bc, 0x00000000},
-	{0x0000b0c0, 0x003f0020},
-	{0x0000b0c4, 0x00400041},
-	{0x0000b0c8, 0x0140005f},
-	{0x0000b0cc, 0x0160015f},
-	{0x0000b0d0, 0x017e017f},
-	{0x0000b0d4, 0x02410242},
-	{0x0000b0d8, 0x025f0240},
-	{0x0000b0dc, 0x027f0260},
-	{0x0000b0e0, 0x0341027e},
-	{0x0000b0e4, 0x035f0340},
-	{0x0000b0e8, 0x037f0360},
-	{0x0000b0ec, 0x04400441},
-	{0x0000b0f0, 0x0460045f},
-	{0x0000b0f4, 0x0541047f},
-	{0x0000b0f8, 0x055f0540},
-	{0x0000b0fc, 0x057f0560},
-	{0x0000b100, 0x06400641},
-	{0x0000b104, 0x0660065f},
-	{0x0000b108, 0x067e067f},
-	{0x0000b10c, 0x07410742},
-	{0x0000b110, 0x075f0740},
-	{0x0000b114, 0x077f0760},
-	{0x0000b118, 0x07800781},
-	{0x0000b11c, 0x07a0079f},
-	{0x0000b120, 0x07c107bf},
-	{0x0000b124, 0x000007c0},
-	{0x0000b128, 0x00000000},
-	{0x0000b12c, 0x00000000},
-	{0x0000b130, 0x00000000},
-	{0x0000b134, 0x00000000},
-	{0x0000b138, 0x00000000},
-	{0x0000b13c, 0x00000000},
-	{0x0000b140, 0x003f0020},
-	{0x0000b144, 0x00400041},
-	{0x0000b148, 0x0140005f},
-	{0x0000b14c, 0x0160015f},
-	{0x0000b150, 0x017e017f},
-	{0x0000b154, 0x02410242},
-	{0x0000b158, 0x025f0240},
-	{0x0000b15c, 0x027f0260},
-	{0x0000b160, 0x0341027e},
-	{0x0000b164, 0x035f0340},
-	{0x0000b168, 0x037f0360},
-	{0x0000b16c, 0x04400441},
-	{0x0000b170, 0x0460045f},
-	{0x0000b174, 0x0541047f},
-	{0x0000b178, 0x055f0540},
-	{0x0000b17c, 0x057f0560},
-	{0x0000b180, 0x06400641},
-	{0x0000b184, 0x0660065f},
-	{0x0000b188, 0x067e067f},
-	{0x0000b18c, 0x07410742},
-	{0x0000b190, 0x075f0740},
-	{0x0000b194, 0x077f0760},
-	{0x0000b198, 0x07800781},
-	{0x0000b19c, 0x07a0079f},
-	{0x0000b1a0, 0x07c107bf},
-	{0x0000b1a4, 0x000007c0},
-	{0x0000b1a8, 0x00000000},
-	{0x0000b1ac, 0x00000000},
-	{0x0000b1b0, 0x00000000},
-	{0x0000b1b4, 0x00000000},
-	{0x0000b1b8, 0x00000000},
-	{0x0000b1bc, 0x00000000},
-	{0x0000b1c0, 0x00000000},
-	{0x0000b1c4, 0x00000000},
-	{0x0000b1c8, 0x00000000},
-	{0x0000b1cc, 0x00000000},
-	{0x0000b1d0, 0x00000000},
-	{0x0000b1d4, 0x00000000},
-	{0x0000b1d8, 0x00000000},
-	{0x0000b1dc, 0x00000000},
-	{0x0000b1e0, 0x00000000},
-	{0x0000b1e4, 0x00000000},
-	{0x0000b1e8, 0x00000000},
-	{0x0000b1ec, 0x00000000},
-	{0x0000b1f0, 0x00000396},
-	{0x0000b1f4, 0x00000396},
-	{0x0000b1f8, 0x00000396},
-	{0x0000b1fc, 0x00000196},
-};
-
 static const u32 ar955x_1p0_soc_preamble[][2] = {
 	/* Addr      allmodes  */
 	{0x00007000, 0x00000000},
@@ -1263,11 +744,6 @@ static const u32 ar955x_1p0_modes_no_xpa_tx_gain_table[][9] = {
 	{0x00016848, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
 };
 
-static const u32 ar955x_1p0_soc_postamble[][5] = {
-	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
-};
-
 static const u32 ar955x_1p0_modes_fast_clock[][3] = {
 	/* Addr      5G_HT20     5G_HT40   */
 	{0x00001030, 0x00000268, 0x000004d0},
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index a8c757b6124f..10d4a6cb1c3b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -20,6 +20,12 @@
 
 /* AR9565 1.0 */
 
+#define ar9565_1p0_mac_postamble ar9331_1p1_mac_postamble
+
+#define ar9565_1p0_Modes_lowest_ob_db_tx_gain_table ar9565_1p0_modes_low_ob_db_tx_gain_table
+
+#define ar9565_1p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
 static const u32 ar9565_1p0_mac_core[][2] = {
 	/* Addr      allmodes  */
 	{0x00000008, 0x00000000},
@@ -182,18 +188,6 @@ static const u32 ar9565_1p0_mac_core[][2] = {
 	{0x000083d0, 0x800301ff},
 };
 
-static const u32 ar9565_1p0_mac_postamble[][5] = {
-	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
-	{0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
-	{0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
-	{0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
-	{0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
-	{0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
-	{0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
-	{0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
-
 static const u32 ar9565_1p0_baseband_core[][2] = {
 	/* Addr      allmodes  */
 	{0x00009800, 0xafe68e30},
@@ -711,66 +705,6 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
 	{0x0000b1fc, 0x00000196},
 };
 
-static const u32 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table[][5] = {
-	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-	{0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
-	{0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
-	{0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
-	{0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
-	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
-	{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
-	{0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
-	{0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
-	{0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
-	{0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
-	{0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
-	{0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
-	{0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
-	{0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
-	{0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
-	{0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
-	{0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
-	{0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
-	{0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
-	{0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
-	{0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
-	{0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
-	{0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
-	{0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
-	{0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
-	{0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
-	{0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
-	{0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
-	{0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
-	{0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
-	{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
-	{0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-};
-
 static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = {
 	/* Addr      allmodes  */
 	{0x00018c00, 0x18212ede},
@@ -1231,11 +1165,4 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
 	{0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 };
 
-static const u32 ar9565_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
-	/* Addr      allmodes  */
-	{0x0000a398, 0x00000000},
-	{0x0000a39c, 0x6f7f0301},
-	{0x0000a3a0, 0xca9228ee},
-};
-
 #endif /* INITVALS_9565_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p1_initvals.h
new file mode 100644
index 000000000000..56810539971e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p1_initvals.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9565_1P1_H
+#define INITVALS_9565_1P1_H
+
+/* AR9565 1.1 */
+
+#define ar9565_1p1_mac_core ar9565_1p0_mac_core
+
+#define ar9565_1p1_mac_postamble ar9565_1p0_mac_postamble
+
+#define ar9565_1p1_baseband_core ar9565_1p0_baseband_core
+
+#define ar9565_1p1_baseband_postamble ar9565_1p0_baseband_postamble
+
+#define ar9565_1p1_radio_core ar9565_1p0_radio_core
+
+#define ar9565_1p1_soc_preamble ar9565_1p0_soc_preamble
+
+#define ar9565_1p1_soc_postamble ar9565_1p0_soc_postamble
+
+#define ar9565_1p1_Common_rx_gain_table ar9565_1p0_Common_rx_gain_table
+
+#define ar9565_1p1_Modes_lowest_ob_db_tx_gain_table ar9565_1p0_Modes_lowest_ob_db_tx_gain_table
+
+#define ar9565_1p1_pciephy_clkreq_disable_L1 ar9565_1p0_pciephy_clkreq_disable_L1
+
+#define ar9565_1p1_modes_fast_clock ar9565_1p0_modes_fast_clock
+
+#define ar9565_1p1_common_wo_xlna_rx_gain_table ar9565_1p0_common_wo_xlna_rx_gain_table
+
+#define ar9565_1p1_modes_low_ob_db_tx_gain_table ar9565_1p0_modes_low_ob_db_tx_gain_table
+
+#define ar9565_1p1_modes_high_ob_db_tx_gain_table ar9565_1p0_modes_high_ob_db_tx_gain_table
+
+#define ar9565_1p1_modes_high_power_tx_gain_table ar9565_1p0_modes_high_power_tx_gain_table
+
+#define ar9565_1p1_baseband_core_txfir_coeff_japan_2484 ar9565_1p0_baseband_core_txfir_coeff_japan_2484
+
+static const u32 ar9565_1p1_radio_postamble[][5] = {
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+	{0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
+	{0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
+	{0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
+	{0x0001610c, 0x40000000, 0x40000000, 0x40000000, 0x40000000},
+	{0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+#endif /* INITVALS_9565_1P1_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index bdee2ed67219..e6aec2c0207f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -20,18 +20,34 @@
 
 /* AR9580 1.0 */
 
+#define ar9580_1p0_soc_preamble ar9300_2p2_soc_preamble
+
+#define ar9580_1p0_soc_postamble ar9300_2p2_soc_postamble
+
+#define ar9580_1p0_radio_core ar9300_2p2_radio_core
+
+#define ar9580_1p0_mac_postamble ar9300_2p2_mac_postamble
+
+#define ar9580_1p0_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
+
+#define ar9580_1p0_type5_tx_gain_table ar9300Modes_type5_tx_gain_table_2p2
+
+#define ar9580_1p0_high_ob_db_tx_gain_table ar9300Modes_high_ob_db_tx_gain_table_2p2
+
 #define ar9580_1p0_modes_fast_clock ar9300Modes_fast_clock_2p2
 
+#define ar9580_1p0_baseband_core_txfir_coeff_japan_2484 ar9300_2p2_baseband_core_txfir_coeff_japan_2484
+
 static const u32 ar9580_1p0_radio_postamble[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x0001609c, 0x0dd08f29, 0x0dd08f29, 0x0b283f31, 0x0b283f31},
 	{0x000160ac, 0xa4653c00, 0xa4653c00, 0x24652800, 0x24652800},
 	{0x000160b0, 0x03284f3e, 0x03284f3e, 0x05d08f20, 0x05d08f20},
-	{0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0001610c, 0xc8000000, 0xc0000000, 0xc0000000, 0xc0000000},
 	{0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
-	{0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0001650c, 0xc8000000, 0xc0000000, 0xc0000000, 0xc0000000},
 	{0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
-	{0x0001690c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0001690c, 0xc8000000, 0xc0000000, 0xc0000000, 0xc0000000},
 	{0x00016940, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
 };
 
@@ -41,12 +57,10 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
 	{0x00009804, 0xfd14e000},
 	{0x00009808, 0x9c0a9f6b},
 	{0x0000980c, 0x04900000},
-	{0x00009814, 0x3280c00a},
-	{0x00009818, 0x00000000},
 	{0x0000981c, 0x00020028},
-	{0x00009834, 0x6400a290},
+	{0x00009834, 0x6400a190},
 	{0x00009838, 0x0108ecff},
-	{0x0000983c, 0x0d000600},
+	{0x0000983c, 0x14000600},
 	{0x00009880, 0x201fff00},
 	{0x00009884, 0x00001042},
 	{0x000098a4, 0x00200400},
@@ -67,7 +81,7 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
 	{0x00009d04, 0x40206c10},
 	{0x00009d08, 0x009c4060},
 	{0x00009d0c, 0x9883800a},
-	{0x00009d10, 0x01834061},
+	{0x00009d10, 0x01884061},
 	{0x00009d14, 0x00c0040b},
 	{0x00009d18, 0x00000000},
 	{0x00009e08, 0x0038230c},
@@ -198,8 +212,6 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
 	{0x0000c420, 0x00000000},
 };
 
-#define ar9580_1p0_mac_postamble ar9300_2p2_mac_postamble
-
 static const u32 ar9580_1p0_low_ob_db_tx_gain_table[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
@@ -306,7 +318,112 @@ static const u32 ar9580_1p0_low_ob_db_tx_gain_table[][5] = {
 	{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
 };
 
-#define ar9580_1p0_high_power_tx_gain_table ar9580_1p0_low_ob_db_tx_gain_table
+static const u32 ar9580_1p0_high_power_tx_gain_table[][5] = {
+	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+	{0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+	{0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+	{0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+	{0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+	{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+	{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+	{0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+	{0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+	{0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202},
+	{0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400},
+	{0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402},
+	{0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404},
+	{0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603},
+	{0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02},
+	{0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04},
+	{0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20},
+	{0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20},
+	{0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22},
+	{0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24},
+	{0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640},
+	{0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660},
+	{0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861},
+	{0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81},
+	{0x0000a54c, 0x5e08442e, 0x5e08442e, 0x47001a83, 0x47001a83},
+	{0x0000a550, 0x620a4431, 0x620a4431, 0x4a001c84, 0x4a001c84},
+	{0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3},
+	{0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5},
+	{0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9},
+	{0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb},
+	{0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+	{0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+	{0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+	{0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+	{0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+	{0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+	{0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+	{0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+	{0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+	{0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+	{0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+	{0x0000a590, 0x15800028, 0x15800028, 0x0f800202, 0x0f800202},
+	{0x0000a594, 0x1b80002b, 0x1b80002b, 0x12800400, 0x12800400},
+	{0x0000a598, 0x1f820028, 0x1f820028, 0x16800402, 0x16800402},
+	{0x0000a59c, 0x2582002b, 0x2582002b, 0x19800404, 0x19800404},
+	{0x0000a5a0, 0x2a84002a, 0x2a84002a, 0x1c800603, 0x1c800603},
+	{0x0000a5a4, 0x2e86002a, 0x2e86002a, 0x21800a02, 0x21800a02},
+	{0x0000a5a8, 0x3382202d, 0x3382202d, 0x25800a04, 0x25800a04},
+	{0x0000a5ac, 0x3884202c, 0x3884202c, 0x28800a20, 0x28800a20},
+	{0x0000a5b0, 0x3c86202c, 0x3c86202c, 0x2c800e20, 0x2c800e20},
+	{0x0000a5b4, 0x4188202d, 0x4188202d, 0x30800e22, 0x30800e22},
+	{0x0000a5b8, 0x4586402d, 0x4586402d, 0x34800e24, 0x34800e24},
+	{0x0000a5bc, 0x4986222d, 0x4986222d, 0x38801640, 0x38801640},
+	{0x0000a5c0, 0x4d862231, 0x4d862231, 0x3c801660, 0x3c801660},
+	{0x0000a5c4, 0x50882231, 0x50882231, 0x3f801861, 0x3f801861},
+	{0x0000a5c8, 0x5688422e, 0x5688422e, 0x43801a81, 0x43801a81},
+	{0x0000a5cc, 0x5a88442e, 0x5a88442e, 0x47801a83, 0x47801a83},
+	{0x0000a5d0, 0x5e8a4431, 0x5e8a4431, 0x4a801c84, 0x4a801c84},
+	{0x0000a5d4, 0x648a4432, 0x648a4432, 0x4e801ce3, 0x4e801ce3},
+	{0x0000a5d8, 0x688a4434, 0x688a4434, 0x52801ce5, 0x52801ce5},
+	{0x0000a5dc, 0x6c8a6434, 0x6c8a6434, 0x56801ce9, 0x56801ce9},
+	{0x0000a5e0, 0x6f8a6633, 0x6f8a6633, 0x5a801ceb, 0x5a801ceb},
+	{0x0000a5e4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+	{0x0000a5e8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+	{0x0000a5ec, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+	{0x0000a5f0, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+	{0x0000a5f4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+	{0x0000a5f8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+	{0x0000a5fc, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+	{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+	{0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+	{0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+	{0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+	{0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000},
+	{0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501},
+	{0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501},
+	{0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03},
+	{0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+	{0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04},
+	{0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+	{0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+	{0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+	{0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+	{0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+	{0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+	{0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+	{0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+	{0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+	{0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+	{0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+	{0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+	{0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+	{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+	{0x00016048, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+	{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+	{0x00016288, 0x05a2040a, 0x05a2040a, 0x05a20408, 0x05a20408},
+	{0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+	{0x00016448, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+	{0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+	{0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+	{0x00016848, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+	{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
 
 static const u32 ar9580_1p0_lowest_ob_db_tx_gain_table[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
@@ -414,8 +531,6 @@ static const u32 ar9580_1p0_lowest_ob_db_tx_gain_table[][5] = {
 	{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
 };
 
-#define ar9580_1p0_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
-
 static const u32 ar9580_1p0_mac_core[][2] = {
 	/* Addr      allmodes  */
 	{0x00000008, 0x00000000},
@@ -679,14 +794,6 @@ static const u32 ar9580_1p0_mixed_ob_db_tx_gain_table[][5] = {
 	{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
 };
 
-#define ar9580_1p0_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
-
-#define ar9580_1p0_soc_postamble ar9300_2p2_soc_postamble
-
-#define ar9580_1p0_high_ob_db_tx_gain_table ar9300Modes_high_ob_db_tx_gain_table_2p2
-
-#define ar9580_1p0_type5_tx_gain_table ar9300Modes_type5_tx_gain_table_2p2
-
 static const u32 ar9580_1p0_type6_tx_gain_table[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
@@ -761,165 +868,271 @@ static const u32 ar9580_1p0_type6_tx_gain_table[][5] = {
 	{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
 };
 
-static const u32 ar9580_1p0_soc_preamble[][2] = {
+static const u32 ar9580_1p0_rx_gain_table[][2] = {
 	/* Addr      allmodes  */
-	{0x000040a4, 0x00a0c1c9},
-	{0x00007008, 0x00000000},
-	{0x00007020, 0x00000000},
-	{0x00007034, 0x00000002},
-	{0x00007038, 0x000004c2},
-	{0x00007048, 0x00000008},
-};
-
-#define ar9580_1p0_rx_gain_table ar9462_common_rx_gain_table_2p0
-
-static const u32 ar9580_1p0_radio_core[][2] = {
-	/* Addr      allmodes  */
-	{0x00016000, 0x36db6db6},
-	{0x00016004, 0x6db6db40},
-	{0x00016008, 0x73f00000},
-	{0x0001600c, 0x00000000},
-	{0x00016040, 0x7f80fff8},
-	{0x0001604c, 0x76d005b5},
-	{0x00016050, 0x556cf031},
-	{0x00016054, 0x13449440},
-	{0x00016058, 0x0c51c92c},
-	{0x0001605c, 0x3db7fffc},
-	{0x00016060, 0xfffffffc},
-	{0x00016064, 0x000f0278},
-	{0x0001606c, 0x6db60000},
-	{0x00016080, 0x00000000},
-	{0x00016084, 0x0e48048c},
-	{0x00016088, 0x54214514},
-	{0x0001608c, 0x119f481e},
-	{0x00016090, 0x24926490},
-	{0x00016098, 0xd2888888},
-	{0x000160a0, 0x0a108ffe},
-	{0x000160a4, 0x812fc370},
-	{0x000160a8, 0x423c8000},
-	{0x000160b4, 0x92480080},
-	{0x000160c0, 0x00adb6d0},
-	{0x000160c4, 0x6db6db60},
-	{0x000160c8, 0x6db6db6c},
-	{0x000160cc, 0x01e6c000},
-	{0x00016100, 0x3fffbe01},
-	{0x00016104, 0xfff80000},
-	{0x00016108, 0x00080010},
-	{0x00016144, 0x02084080},
-	{0x00016148, 0x00000000},
-	{0x00016280, 0x058a0001},
-	{0x00016284, 0x3d840208},
-	{0x00016288, 0x05a20408},
-	{0x0001628c, 0x00038c07},
-	{0x00016290, 0x00000004},
-	{0x00016294, 0x458aa14f},
-	{0x00016380, 0x00000000},
-	{0x00016384, 0x00000000},
-	{0x00016388, 0x00800700},
-	{0x0001638c, 0x00800700},
-	{0x00016390, 0x00800700},
-	{0x00016394, 0x00000000},
-	{0x00016398, 0x00000000},
-	{0x0001639c, 0x00000000},
-	{0x000163a0, 0x00000001},
-	{0x000163a4, 0x00000001},
-	{0x000163a8, 0x00000000},
-	{0x000163ac, 0x00000000},
-	{0x000163b0, 0x00000000},
-	{0x000163b4, 0x00000000},
-	{0x000163b8, 0x00000000},
-	{0x000163bc, 0x00000000},
-	{0x000163c0, 0x000000a0},
-	{0x000163c4, 0x000c0000},
-	{0x000163c8, 0x14021402},
-	{0x000163cc, 0x00001402},
-	{0x000163d0, 0x00000000},
-	{0x000163d4, 0x00000000},
-	{0x00016400, 0x36db6db6},
-	{0x00016404, 0x6db6db40},
-	{0x00016408, 0x73f00000},
-	{0x0001640c, 0x00000000},
-	{0x00016440, 0x7f80fff8},
-	{0x0001644c, 0x76d005b5},
-	{0x00016450, 0x556cf031},
-	{0x00016454, 0x13449440},
-	{0x00016458, 0x0c51c92c},
-	{0x0001645c, 0x3db7fffc},
-	{0x00016460, 0xfffffffc},
-	{0x00016464, 0x000f0278},
-	{0x0001646c, 0x6db60000},
-	{0x00016500, 0x3fffbe01},
-	{0x00016504, 0xfff80000},
-	{0x00016508, 0x00080010},
-	{0x00016544, 0x02084080},
-	{0x00016548, 0x00000000},
-	{0x00016780, 0x00000000},
-	{0x00016784, 0x00000000},
-	{0x00016788, 0x00800700},
-	{0x0001678c, 0x00800700},
-	{0x00016790, 0x00800700},
-	{0x00016794, 0x00000000},
-	{0x00016798, 0x00000000},
-	{0x0001679c, 0x00000000},
-	{0x000167a0, 0x00000001},
-	{0x000167a4, 0x00000001},
-	{0x000167a8, 0x00000000},
-	{0x000167ac, 0x00000000},
-	{0x000167b0, 0x00000000},
-	{0x000167b4, 0x00000000},
-	{0x000167b8, 0x00000000},
-	{0x000167bc, 0x00000000},
-	{0x000167c0, 0x000000a0},
-	{0x000167c4, 0x000c0000},
-	{0x000167c8, 0x14021402},
-	{0x000167cc, 0x00001402},
-	{0x000167d0, 0x00000000},
-	{0x000167d4, 0x00000000},
-	{0x00016800, 0x36db6db6},
-	{0x00016804, 0x6db6db40},
-	{0x00016808, 0x73f00000},
-	{0x0001680c, 0x00000000},
-	{0x00016840, 0x7f80fff8},
-	{0x0001684c, 0x76d005b5},
-	{0x00016850, 0x556cf031},
-	{0x00016854, 0x13449440},
-	{0x00016858, 0x0c51c92c},
-	{0x0001685c, 0x3db7fffc},
-	{0x00016860, 0xfffffffc},
-	{0x00016864, 0x000f0278},
-	{0x0001686c, 0x6db60000},
-	{0x00016900, 0x3fffbe01},
-	{0x00016904, 0xfff80000},
-	{0x00016908, 0x00080010},
-	{0x00016944, 0x02084080},
-	{0x00016948, 0x00000000},
-	{0x00016b80, 0x00000000},
-	{0x00016b84, 0x00000000},
-	{0x00016b88, 0x00800700},
-	{0x00016b8c, 0x00800700},
-	{0x00016b90, 0x00800700},
-	{0x00016b94, 0x00000000},
-	{0x00016b98, 0x00000000},
-	{0x00016b9c, 0x00000000},
-	{0x00016ba0, 0x00000001},
-	{0x00016ba4, 0x00000001},
-	{0x00016ba8, 0x00000000},
-	{0x00016bac, 0x00000000},
-	{0x00016bb0, 0x00000000},
-	{0x00016bb4, 0x00000000},
-	{0x00016bb8, 0x00000000},
-	{0x00016bbc, 0x00000000},
-	{0x00016bc0, 0x000000a0},
-	{0x00016bc4, 0x000c0000},
-	{0x00016bc8, 0x14021402},
-	{0x00016bcc, 0x00001402},
-	{0x00016bd0, 0x00000000},
-	{0x00016bd4, 0x00000000},
+	{0x0000a000, 0x00010000},
+	{0x0000a004, 0x00030002},
+	{0x0000a008, 0x00050004},
+	{0x0000a00c, 0x00810080},
+	{0x0000a010, 0x00830082},
+	{0x0000a014, 0x01810180},
+	{0x0000a018, 0x01830182},
+	{0x0000a01c, 0x01850184},
+	{0x0000a020, 0x01890188},
+	{0x0000a024, 0x018b018a},
+	{0x0000a028, 0x018d018c},
+	{0x0000a02c, 0x01910190},
+	{0x0000a030, 0x01930192},
+	{0x0000a034, 0x01950194},
+	{0x0000a038, 0x038a0196},
+	{0x0000a03c, 0x038c038b},
+	{0x0000a040, 0x0390038d},
+	{0x0000a044, 0x03920391},
+	{0x0000a048, 0x03940393},
+	{0x0000a04c, 0x03960395},
+	{0x0000a050, 0x00000000},
+	{0x0000a054, 0x00000000},
+	{0x0000a058, 0x00000000},
+	{0x0000a05c, 0x00000000},
+	{0x0000a060, 0x00000000},
+	{0x0000a064, 0x00000000},
+	{0x0000a068, 0x00000000},
+	{0x0000a06c, 0x00000000},
+	{0x0000a070, 0x00000000},
+	{0x0000a074, 0x00000000},
+	{0x0000a078, 0x00000000},
+	{0x0000a07c, 0x00000000},
+	{0x0000a080, 0x22222229},
+	{0x0000a084, 0x1d1d1d1d},
+	{0x0000a088, 0x1d1d1d1d},
+	{0x0000a08c, 0x1d1d1d1d},
+	{0x0000a090, 0x171d1d1d},
+	{0x0000a094, 0x11111717},
+	{0x0000a098, 0x00030311},
+	{0x0000a09c, 0x00000000},
+	{0x0000a0a0, 0x00000000},
+	{0x0000a0a4, 0x00000000},
+	{0x0000a0a8, 0x00000000},
+	{0x0000a0ac, 0x00000000},
+	{0x0000a0b0, 0x00000000},
+	{0x0000a0b4, 0x00000000},
+	{0x0000a0b8, 0x00000000},
+	{0x0000a0bc, 0x00000000},
+	{0x0000a0c0, 0x001f0000},
+	{0x0000a0c4, 0x01000101},
+	{0x0000a0c8, 0x011e011f},
+	{0x0000a0cc, 0x011c011d},
+	{0x0000a0d0, 0x02030204},
+	{0x0000a0d4, 0x02010202},
+	{0x0000a0d8, 0x021f0200},
+	{0x0000a0dc, 0x0302021e},
+	{0x0000a0e0, 0x03000301},
+	{0x0000a0e4, 0x031e031f},
+	{0x0000a0e8, 0x0402031d},
+	{0x0000a0ec, 0x04000401},
+	{0x0000a0f0, 0x041e041f},
+	{0x0000a0f4, 0x0502041d},
+	{0x0000a0f8, 0x05000501},
+	{0x0000a0fc, 0x051e051f},
+	{0x0000a100, 0x06010602},
+	{0x0000a104, 0x061f0600},
+	{0x0000a108, 0x061d061e},
+	{0x0000a10c, 0x07020703},
+	{0x0000a110, 0x07000701},
+	{0x0000a114, 0x00000000},
+	{0x0000a118, 0x00000000},
+	{0x0000a11c, 0x00000000},
+	{0x0000a120, 0x00000000},
+	{0x0000a124, 0x00000000},
+	{0x0000a128, 0x00000000},
+	{0x0000a12c, 0x00000000},
+	{0x0000a130, 0x00000000},
+	{0x0000a134, 0x00000000},
+	{0x0000a138, 0x00000000},
+	{0x0000a13c, 0x00000000},
+	{0x0000a140, 0x001f0000},
+	{0x0000a144, 0x01000101},
+	{0x0000a148, 0x011e011f},
+	{0x0000a14c, 0x011c011d},
+	{0x0000a150, 0x02030204},
+	{0x0000a154, 0x02010202},
+	{0x0000a158, 0x021f0200},
+	{0x0000a15c, 0x0302021e},
+	{0x0000a160, 0x03000301},
+	{0x0000a164, 0x031e031f},
+	{0x0000a168, 0x0402031d},
+	{0x0000a16c, 0x04000401},
+	{0x0000a170, 0x041e041f},
+	{0x0000a174, 0x0502041d},
+	{0x0000a178, 0x05000501},
+	{0x0000a17c, 0x051e051f},
+	{0x0000a180, 0x06010602},
+	{0x0000a184, 0x061f0600},
+	{0x0000a188, 0x061d061e},
+	{0x0000a18c, 0x07020703},
+	{0x0000a190, 0x07000701},
+	{0x0000a194, 0x00000000},
+	{0x0000a198, 0x00000000},
+	{0x0000a19c, 0x00000000},
+	{0x0000a1a0, 0x00000000},
+	{0x0000a1a4, 0x00000000},
+	{0x0000a1a8, 0x00000000},
+	{0x0000a1ac, 0x00000000},
+	{0x0000a1b0, 0x00000000},
+	{0x0000a1b4, 0x00000000},
+	{0x0000a1b8, 0x00000000},
+	{0x0000a1bc, 0x00000000},
+	{0x0000a1c0, 0x00000000},
+	{0x0000a1c4, 0x00000000},
+	{0x0000a1c8, 0x00000000},
+	{0x0000a1cc, 0x00000000},
+	{0x0000a1d0, 0x00000000},
+	{0x0000a1d4, 0x00000000},
+	{0x0000a1d8, 0x00000000},
+	{0x0000a1dc, 0x00000000},
+	{0x0000a1e0, 0x00000000},
+	{0x0000a1e4, 0x00000000},
+	{0x0000a1e8, 0x00000000},
+	{0x0000a1ec, 0x00000000},
+	{0x0000a1f0, 0x00000396},
+	{0x0000a1f4, 0x00000396},
+	{0x0000a1f8, 0x00000396},
+	{0x0000a1fc, 0x00000196},
+	{0x0000b000, 0x00010000},
+	{0x0000b004, 0x00030002},
+	{0x0000b008, 0x00050004},
+	{0x0000b00c, 0x00810080},
+	{0x0000b010, 0x00830082},
+	{0x0000b014, 0x01810180},
+	{0x0000b018, 0x01830182},
+	{0x0000b01c, 0x01850184},
+	{0x0000b020, 0x02810280},
+	{0x0000b024, 0x02830282},
+	{0x0000b028, 0x02850284},
+	{0x0000b02c, 0x02890288},
+	{0x0000b030, 0x028b028a},
+	{0x0000b034, 0x0388028c},
+	{0x0000b038, 0x038a0389},
+	{0x0000b03c, 0x038c038b},
+	{0x0000b040, 0x0390038d},
+	{0x0000b044, 0x03920391},
+	{0x0000b048, 0x03940393},
+	{0x0000b04c, 0x03960395},
+	{0x0000b050, 0x00000000},
+	{0x0000b054, 0x00000000},
+	{0x0000b058, 0x00000000},
+	{0x0000b05c, 0x00000000},
+	{0x0000b060, 0x00000000},
+	{0x0000b064, 0x00000000},
+	{0x0000b068, 0x00000000},
+	{0x0000b06c, 0x00000000},
+	{0x0000b070, 0x00000000},
+	{0x0000b074, 0x00000000},
+	{0x0000b078, 0x00000000},
+	{0x0000b07c, 0x00000000},
+	{0x0000b080, 0x23232323},
+	{0x0000b084, 0x21232323},
+	{0x0000b088, 0x19191c1e},
+	{0x0000b08c, 0x12141417},
+	{0x0000b090, 0x07070e0e},
+	{0x0000b094, 0x03030305},
+	{0x0000b098, 0x00000003},
+	{0x0000b09c, 0x00000000},
+	{0x0000b0a0, 0x00000000},
+	{0x0000b0a4, 0x00000000},
+	{0x0000b0a8, 0x00000000},
+	{0x0000b0ac, 0x00000000},
+	{0x0000b0b0, 0x00000000},
+	{0x0000b0b4, 0x00000000},
+	{0x0000b0b8, 0x00000000},
+	{0x0000b0bc, 0x00000000},
+	{0x0000b0c0, 0x003f0020},
+	{0x0000b0c4, 0x00400041},
+	{0x0000b0c8, 0x0140005f},
+	{0x0000b0cc, 0x0160015f},
+	{0x0000b0d0, 0x017e017f},
+	{0x0000b0d4, 0x02410242},
+	{0x0000b0d8, 0x025f0240},
+	{0x0000b0dc, 0x027f0260},
+	{0x0000b0e0, 0x0341027e},
+	{0x0000b0e4, 0x035f0340},
+	{0x0000b0e8, 0x037f0360},
+	{0x0000b0ec, 0x04400441},
+	{0x0000b0f0, 0x0460045f},
+	{0x0000b0f4, 0x0541047f},
+	{0x0000b0f8, 0x055f0540},
+	{0x0000b0fc, 0x057f0560},
+	{0x0000b100, 0x06400641},
+	{0x0000b104, 0x0660065f},
+	{0x0000b108, 0x067e067f},
+	{0x0000b10c, 0x07410742},
+	{0x0000b110, 0x075f0740},
+	{0x0000b114, 0x077f0760},
+	{0x0000b118, 0x07800781},
+	{0x0000b11c, 0x07a0079f},
+	{0x0000b120, 0x07c107bf},
+	{0x0000b124, 0x000007c0},
+	{0x0000b128, 0x00000000},
+	{0x0000b12c, 0x00000000},
+	{0x0000b130, 0x00000000},
+	{0x0000b134, 0x00000000},
+	{0x0000b138, 0x00000000},
+	{0x0000b13c, 0x00000000},
+	{0x0000b140, 0x003f0020},
+	{0x0000b144, 0x00400041},
+	{0x0000b148, 0x0140005f},
+	{0x0000b14c, 0x0160015f},
+	{0x0000b150, 0x017e017f},
+	{0x0000b154, 0x02410242},
+	{0x0000b158, 0x025f0240},
+	{0x0000b15c, 0x027f0260},
+	{0x0000b160, 0x0341027e},
+	{0x0000b164, 0x035f0340},
+	{0x0000b168, 0x037f0360},
+	{0x0000b16c, 0x04400441},
+	{0x0000b170, 0x0460045f},
+	{0x0000b174, 0x0541047f},
+	{0x0000b178, 0x055f0540},
+	{0x0000b17c, 0x057f0560},
+	{0x0000b180, 0x06400641},
+	{0x0000b184, 0x0660065f},
+	{0x0000b188, 0x067e067f},
+	{0x0000b18c, 0x07410742},
+	{0x0000b190, 0x075f0740},
+	{0x0000b194, 0x077f0760},
+	{0x0000b198, 0x07800781},
+	{0x0000b19c, 0x07a0079f},
+	{0x0000b1a0, 0x07c107bf},
+	{0x0000b1a4, 0x000007c0},
+	{0x0000b1a8, 0x00000000},
+	{0x0000b1ac, 0x00000000},
+	{0x0000b1b0, 0x00000000},
+	{0x0000b1b4, 0x00000000},
+	{0x0000b1b8, 0x00000000},
+	{0x0000b1bc, 0x00000000},
+	{0x0000b1c0, 0x00000000},
+	{0x0000b1c4, 0x00000000},
+	{0x0000b1c8, 0x00000000},
+	{0x0000b1cc, 0x00000000},
+	{0x0000b1d0, 0x00000000},
+	{0x0000b1d4, 0x00000000},
+	{0x0000b1d8, 0x00000000},
+	{0x0000b1dc, 0x00000000},
+	{0x0000b1e0, 0x00000000},
+	{0x0000b1e4, 0x00000000},
+	{0x0000b1e8, 0x00000000},
+	{0x0000b1ec, 0x00000000},
+	{0x0000b1f0, 0x00000396},
+	{0x0000b1f4, 0x00000396},
+	{0x0000b1f8, 0x00000396},
+	{0x0000b1fc, 0x00000196},
 };
 
 static const u32 ar9580_1p0_baseband_postamble[][5] = {
 	/* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
 	{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+	{0x00009814, 0x3280c00a, 0x3280c00a, 0x3280c00a, 0x3280c00a},
+	{0x00009818, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
 	{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
 	{0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
@@ -956,7 +1169,7 @@ static const u32 ar9580_1p0_baseband_postamble[][5] = {
 	{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
 	{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
 	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
-	{0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
+	{0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982},
 	{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
 	{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
@@ -994,4 +1207,13 @@ static const u32 ar9580_1p0_pcie_phy_pll_on_clkreq[][2] = {
 	{0x00004044, 0x00000000},
 };
 
+static const u32 ar9580_1p0_baseband_postamble_dfs_channel[][3] = {
+	/* Addr      5G          2G        */
+	{0x00009814, 0x3400c00f, 0x3400c00f},
+	{0x00009824, 0x5ac668d0, 0x5ac668d0},
+	{0x00009828, 0x06903080, 0x06903080},
+	{0x00009e0c, 0x6d4000e2, 0x6d4000e2},
+	{0x00009e14, 0x37b9625e, 0x37b9625e},
+};
+
 #endif /* INITVALS_9580_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 60a5da53668f..b5ac32cfbeb8 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -27,40 +27,15 @@
 #include "common.h"
 #include "mci.h"
 #include "dfs.h"
-
-/*
- * Header for the ath9k.ko driver core *only* -- hw code nor any other driver
- * should rely on this file or its contents.
- */
+#include "spectral.h"
 
 struct ath_node;
+struct ath_rate_table;
 
-/* Macro to expand scalars to 64-bit objects */
-
-#define	ito64(x) (sizeof(x) == 1) ?			\
-	(((unsigned long long int)(x)) & (0xff)) :	\
-	(sizeof(x) == 2) ?				\
-	(((unsigned long long int)(x)) & 0xffff) :	\
-	((sizeof(x) == 4) ?				\
-	 (((unsigned long long int)(x)) & 0xffffffff) : \
-	 (unsigned long long int)(x))
-
-/* increment with wrap-around */
-#define INCR(_l, _sz)   do {			\
-		(_l)++;				\
-		(_l) &= ((_sz) - 1);		\
-	} while (0)
-
-/* decrement with wrap-around */
-#define DECR(_l,  _sz)  do {			\
-		(_l)--;				\
-		(_l) &= ((_sz) - 1);		\
-	} while (0)
-
-#define TSF_TO_TU(_h,_l) \
-	((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
-
-#define	ATH_TXQ_SETUP(sc, i)        ((sc)->tx.txqsetup & (1<<i))
+extern struct ieee80211_ops ath9k_ops;
+extern int ath9k_modparam_nohwcrypt;
+extern int led_blink;
+extern bool is_ath9k_unloaded;
 
 struct ath_config {
 	u16 txpowlimit;
@@ -70,6 +45,17 @@ struct ath_config {
 /* Descriptor Management */
 /*************************/
 
+#define ATH_TXSTATUS_RING_SIZE 512
+
+/* Macro to expand scalars to 64-bit objects */
+#define	ito64(x) (sizeof(x) == 1) ?			\
+	(((unsigned long long int)(x)) & (0xff)) :	\
+	(sizeof(x) == 2) ?				\
+	(((unsigned long long int)(x)) & 0xffff) :	\
+	((sizeof(x) == 4) ?				\
+	 (((unsigned long long int)(x)) & 0xffffffff) : \
+	 (unsigned long long int)(x))
+
 #define ATH_TXBUF_RESET(_bf) do {				\
 		(_bf)->bf_lastbf = NULL;			\
 		(_bf)->bf_next = NULL;				\
@@ -77,23 +63,6 @@ struct ath_config {
 		       sizeof(struct ath_buf_state));		\
 	} while (0)
 
-/**
- * enum buffer_type - Buffer type flags
- *
- * @BUF_AMPDU: This buffer is an ampdu, as part of an aggregate (during TX)
- * @BUF_AGGR: Indicates whether the buffer can be aggregated
- *	(used in aggregation scheduling)
- */
-enum buffer_type {
-	BUF_AMPDU		= BIT(0),
-	BUF_AGGR		= BIT(1),
-};
-
-#define bf_isampdu(bf)		(bf->bf_state.bf_type & BUF_AMPDU)
-#define bf_isaggr(bf)		(bf->bf_state.bf_type & BUF_AGGR)
-
-#define ATH_TXSTATUS_RING_SIZE 512
-
 #define	DS2PHYS(_dd, _ds)						\
 	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
@@ -113,11 +82,20 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
 /* RX / TX */
 /***********/
 
+#define	ATH_TXQ_SETUP(sc, i) ((sc)->tx.txqsetup & (1<<i))
+
+/* increment with wrap-around */
+#define INCR(_l, _sz)   do {			\
+		(_l)++;				\
+		(_l) &= ((_sz) - 1);		\
+	} while (0)
+
 #define ATH_RXBUF               512
 #define ATH_TXBUF               512
 #define ATH_TXBUF_RESERVE       5
 #define ATH_MAX_QDEPTH          (ATH_TXBUF / 4 - ATH_TXBUF_RESERVE)
 #define ATH_TXMAXTRY            13
+#define ATH_MAX_SW_RETRIES      30
 
 #define TID_TO_WME_AC(_tid)				\
 	((((_tid) == 0) || ((_tid) == 3)) ? IEEE80211_AC_BE :	\
@@ -133,6 +111,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
 #define ATH_AGGR_MIN_QDEPTH        2
 /* minimum h/w qdepth for non-aggregated traffic */
 #define ATH_NON_AGGR_MIN_QDEPTH    8
+#define ATH_TX_COMPLETE_POLL_INT   1000
+#define ATH_TXFIFO_DEPTH           8
+#define ATH_TX_ERROR               0x01
 
 #define IEEE80211_SEQ_SEQ_SHIFT    4
 #define IEEE80211_SEQ_MAX          4096
@@ -165,11 +146,10 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
 
 #define ATH_AN_2_TID(_an, _tidno)  (&(_an)->tid[(_tidno)])
 
-#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
+#define IS_HT_RATE(rate)   (rate & 0x80)
+#define IS_CCK_RATE(rate)  ((rate >= 0x18) && (rate <= 0x1e))
+#define IS_OFDM_RATE(rate) ((rate >= 0x8) && (rate <= 0xf))
 
-#define ATH_TX_COMPLETE_POLL_INT	1000
-
-#define ATH_TXFIFO_DEPTH 8
 struct ath_txq {
 	int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
 	u32 axq_qnum; /* ath9k hardware queue number */
@@ -214,6 +194,21 @@ struct ath_rxbuf {
 	dma_addr_t bf_buf_addr;
 };
 
+/**
+ * enum buffer_type - Buffer type flags
+ *
+ * @BUF_AMPDU: This buffer is an ampdu, as part of an aggregate (during TX)
+ * @BUF_AGGR: Indicates whether the buffer can be aggregated
+ *	(used in aggregation scheduling)
+ */
+enum buffer_type {
+	BUF_AMPDU		= BIT(0),
+	BUF_AGGR		= BIT(1),
+};
+
+#define bf_isampdu(bf)		(bf->bf_state.bf_type & BUF_AMPDU)
+#define bf_isaggr(bf)		(bf->bf_state.bf_type & BUF_AGGR)
+
 struct ath_buf_state {
 	u8 bf_type;
 	u8 bfs_paprd;
@@ -269,6 +264,10 @@ struct ath_node {
 
 	bool sleeping;
 	bool no_ps_filter;
+
+#ifdef CONFIG_ATH9K_STATION_STATISTICS
+	struct ath_rx_rate_stats rx_rate_stats;
+#endif
 };
 
 struct ath_tx_control {
@@ -278,7 +277,6 @@ struct ath_tx_control {
 	struct ieee80211_sta *sta;
 };
 
-#define ATH_TX_ERROR        0x01
 
 /**
  * @txq_map:  Index is mac80211 queue number.  This is
@@ -372,6 +370,22 @@ struct ath_vif {
 	struct ath_buf *av_bcbuf;
 };
 
+struct ath9k_vif_iter_data {
+	u8 hw_macaddr[ETH_ALEN]; /* address of the first vif */
+	u8 mask[ETH_ALEN]; /* bssid mask */
+	bool has_hw_macaddr;
+
+	int naps;      /* number of AP vifs */
+	int nmeshes;   /* number of mesh vifs */
+	int nstations; /* number of station vifs */
+	int nwds;      /* number of WDS vifs */
+	int nadhocs;   /* number of adhoc vifs */
+};
+
+void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
+			       struct ieee80211_vif *vif,
+			       struct ath9k_vif_iter_data *iter_data);
+
 /*******************/
 /* Beacon Handling */
 /*******************/
@@ -387,6 +401,9 @@ struct ath_vif {
 #define ATH_DEFAULT_BMISS_LIMIT 	10
 #define IEEE80211_MS_TO_TU(x)           (((x) * 1000) / 1024)
 
+#define TSF_TO_TU(_h,_l) \
+	((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
+
 struct ath_beacon_config {
 	int beacon_interval;
 	u16 listen_interval;
@@ -420,12 +437,10 @@ struct ath_beacon {
 };
 
 void ath9k_beacon_tasklet(unsigned long data);
-bool ath9k_allow_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
 void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
 			 u32 changed);
 void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
 void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
-void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif);
 void ath9k_set_beacon(struct ath_softc *sc);
 bool ath9k_csa_is_finished(struct ath_softc *sc);
 
@@ -440,17 +455,14 @@ bool ath9k_csa_is_finished(struct ath_softc *sc);
 #define ATH_LONG_CALINTERVAL_INT  1000    /* 1000 ms */
 #define ATH_LONG_CALINTERVAL      30000   /* 30 seconds */
 #define ATH_RESTART_CALINTERVAL   1200000 /* 20 minutes */
-#define ATH_ANI_MAX_SKIP_COUNT  10
-
-#define ATH_PAPRD_TIMEOUT	100 /* msecs */
-#define ATH_PLL_WORK_INTERVAL   100
+#define ATH_ANI_MAX_SKIP_COUNT    10
+#define ATH_PAPRD_TIMEOUT         100 /* msecs */
+#define ATH_PLL_WORK_INTERVAL     100
 
 void ath_tx_complete_poll_work(struct work_struct *work);
 void ath_reset_work(struct work_struct *work);
-void ath_hw_check(struct work_struct *work);
+bool ath_hw_check(struct ath_softc *sc);
 void ath_hw_pll_work(struct work_struct *work);
-void ath_rx_poll(unsigned long data);
-void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon);
 void ath_paprd_calibrate(struct work_struct *work);
 void ath_ani_calibrate(unsigned long data);
 void ath_start_ani(struct ath_softc *sc);
@@ -459,6 +471,7 @@ void ath_check_ani(struct ath_softc *sc);
 int ath_update_survey_stats(struct ath_softc *sc);
 void ath_update_survey_nf(struct ath_softc *sc, int channel);
 void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
+void ath_ps_full_sleep(unsigned long data);
 
 /**********/
 /* BTCOEX */
@@ -476,20 +489,19 @@ enum bt_op_flags {
 };
 
 struct ath_btcoex {
-	bool hw_timer_enabled;
 	spinlock_t btcoex_lock;
 	struct timer_list period_timer; /* Timer for BT period */
+	struct timer_list no_stomp_timer;
 	u32 bt_priority_cnt;
 	unsigned long bt_priority_time;
 	unsigned long op_flags;
 	int bt_stomp_type; /* Types of BT stomping */
-	u32 btcoex_no_stomp; /* in usec */
+	u32 btcoex_no_stomp; /* in msec */
 	u32 btcoex_period; /* in msec */
-	u32 btscan_no_stomp; /* in usec */
+	u32 btscan_no_stomp; /* in msec */
 	u32 duty_cycle;
 	u32 bt_wait_time;
 	int rssi_count;
-	struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
 	struct ath_mci_profile mci;
 	u8 stomp_audio;
 };
@@ -537,12 +549,6 @@ static inline int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
 }
 #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
 
-struct ath9k_wow_pattern {
-	u8 pattern_bytes[MAX_PATTERN_SIZE];
-	u8 mask_bytes[MAX_PATTERN_SIZE];
-	u32 pattern_len;
-};
-
 /********************/
 /*   LED Control    */
 /********************/
@@ -570,6 +576,40 @@ static inline void ath_fill_led_pin(struct ath_softc *sc)
 }
 #endif
 
+/************************/
+/* Wake on Wireless LAN */
+/************************/
+
+struct ath9k_wow_pattern {
+	u8 pattern_bytes[MAX_PATTERN_SIZE];
+	u8 mask_bytes[MAX_PATTERN_SIZE];
+	u32 pattern_len;
+};
+
+#ifdef CONFIG_ATH9K_WOW
+void ath9k_init_wow(struct ieee80211_hw *hw);
+int ath9k_suspend(struct ieee80211_hw *hw,
+		  struct cfg80211_wowlan *wowlan);
+int ath9k_resume(struct ieee80211_hw *hw);
+void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled);
+#else
+static inline void ath9k_init_wow(struct ieee80211_hw *hw)
+{
+}
+static inline int ath9k_suspend(struct ieee80211_hw *hw,
+				struct cfg80211_wowlan *wowlan)
+{
+	return 0;
+}
+static inline int ath9k_resume(struct ieee80211_hw *hw)
+{
+	return 0;
+}
+static inline void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+}
+#endif /* CONFIG_ATH9K_WOW */
+
 /*******************************/
 /* Antenna diversity/combining */
 /*******************************/
@@ -642,19 +682,16 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
 #define ATH9K_PCI_AR9565_1ANT     0x0080
 #define ATH9K_PCI_AR9565_2ANT     0x0100
 #define ATH9K_PCI_NO_PLL_PWRSAVE  0x0200
+#define ATH9K_PCI_KILLER          0x0400
 
 /*
  * Default cache line size, in bytes.
  * Used when PCI device not fully initialized by bootrom/BIOS
 */
 #define DEFAULT_CACHELINE       32
-#define ATH_REGCLASSIDS_MAX     10
 #define ATH_CABQ_READY_TIME     80      /* % of beacon interval */
-#define ATH_MAX_SW_RETRIES      30
-#define ATH_CHAN_MAX            255
-
 #define ATH_TXPOWER_MAX         100     /* .5 dBm units */
-#define ATH_RATE_DUMMY_MARKER   0
+#define MAX_GTT_CNT             5
 
 enum sc_op_flags {
 	SC_OP_INVALID,
@@ -673,37 +710,6 @@ enum sc_op_flags {
 #define PS_BEACON_SYNC            BIT(4)
 #define PS_WAIT_FOR_ANI           BIT(5)
 
-struct ath_rate_table;
-
-struct ath9k_vif_iter_data {
-	u8 hw_macaddr[ETH_ALEN]; /* address of the first vif */
-	u8 mask[ETH_ALEN]; /* bssid mask */
-	bool has_hw_macaddr;
-
-	int naps;      /* number of AP vifs */
-	int nmeshes;   /* number of mesh vifs */
-	int nstations; /* number of station vifs */
-	int nwds;      /* number of WDS vifs */
-	int nadhocs;   /* number of adhoc vifs */
-};
-
-/* enum spectral_mode:
- *
- * @SPECTRAL_DISABLED: spectral mode is disabled
- * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
- *	something else.
- * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
- *	is performed manually.
- * @SPECTRAL_CHANSCAN: Like manual, but also triggered when changing channels
- *	during a channel scan.
- */
-enum spectral_mode {
-	SPECTRAL_DISABLED = 0,
-	SPECTRAL_BACKGROUND,
-	SPECTRAL_MANUAL,
-	SPECTRAL_CHANSCAN,
-};
-
 struct ath_softc {
 	struct ieee80211_hw *hw;
 	struct device *dev;
@@ -721,14 +727,14 @@ struct ath_softc {
 	spinlock_t sc_pcu_lock;
 	struct mutex mutex;
 	struct work_struct paprd_work;
-	struct work_struct hw_check_work;
 	struct work_struct hw_reset_work;
 	struct completion paprd_complete;
+	wait_queue_head_t tx_wait;
 
-	unsigned int hw_busy_count;
 	unsigned long sc_flags;
 	unsigned long driver_data;
 
+	u8 gtt_cnt;
 	u32 intrstatus;
 	u16 ps_flags; /* PS_* */
 	u16 curtxpow;
@@ -759,7 +765,7 @@ struct ath_softc {
 	struct ath_beacon_config cur_beacon_conf;
 	struct delayed_work tx_complete_work;
 	struct delayed_work hw_pll_work;
-	struct timer_list rx_poll_timer;
+	struct timer_list sleep_timer;
 
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
 	struct ath_btcoex btcoex;
@@ -784,199 +790,54 @@ struct ath_softc {
 	bool tx99_state;
 	s16 tx99_power;
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ATH9K_WOW
 	atomic_t wow_got_bmiss_intr;
 	atomic_t wow_sleep_proc_intr; /* in the middle of WoW sleep ? */
 	u32 wow_intr_before_sleep;
 #endif
 };
 
-#define SPECTRAL_SCAN_BITMASK		0x10
-/* Radar info packet format, used for DFS and spectral formats. */
-struct ath_radar_info {
-	u8 pulse_length_pri;
-	u8 pulse_length_ext;
-	u8 pulse_bw_info;
-} __packed;
-
-/* The HT20 spectral data has 4 bytes of additional information at it's end.
- *
- * [7:0]: all bins {max_magnitude[1:0], bitmap_weight[5:0]}
- * [7:0]: all bins  max_magnitude[9:2]
- * [7:0]: all bins {max_index[5:0], max_magnitude[11:10]}
- * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
- */
-struct ath_ht20_mag_info {
-	u8 all_bins[3];
-	u8 max_exp;
-} __packed;
-
-#define SPECTRAL_HT20_NUM_BINS		56
-
-/* WARNING: don't actually use this struct! MAC may vary the amount of
- * data by -1/+2. This struct is for reference only.
- */
-struct ath_ht20_fft_packet {
-	u8 data[SPECTRAL_HT20_NUM_BINS];
-	struct ath_ht20_mag_info mag_info;
-	struct ath_radar_info radar_info;
-} __packed;
-
-#define SPECTRAL_HT20_TOTAL_DATA_LEN	(sizeof(struct ath_ht20_fft_packet))
-
-/* Dynamic 20/40 mode:
- *
- * [7:0]: lower bins {max_magnitude[1:0], bitmap_weight[5:0]}
- * [7:0]: lower bins  max_magnitude[9:2]
- * [7:0]: lower bins {max_index[5:0], max_magnitude[11:10]}
- * [7:0]: upper bins {max_magnitude[1:0], bitmap_weight[5:0]}
- * [7:0]: upper bins  max_magnitude[9:2]
- * [7:0]: upper bins {max_index[5:0], max_magnitude[11:10]}
- * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
- */
-struct ath_ht20_40_mag_info {
-	u8 lower_bins[3];
-	u8 upper_bins[3];
-	u8 max_exp;
-} __packed;
-
-#define SPECTRAL_HT20_40_NUM_BINS		128
-
-/* WARNING: don't actually use this struct! MAC may vary the amount of
- * data. This struct is for reference only.
- */
-struct ath_ht20_40_fft_packet {
-	u8 data[SPECTRAL_HT20_40_NUM_BINS];
-	struct ath_ht20_40_mag_info mag_info;
-	struct ath_radar_info radar_info;
-} __packed;
-
-
-#define SPECTRAL_HT20_40_TOTAL_DATA_LEN	(sizeof(struct ath_ht20_40_fft_packet))
-
-/* grabs the max magnitude from the all/upper/lower bins */
-static inline u16 spectral_max_magnitude(u8 *bins)
-{
-	return (bins[0] & 0xc0) >> 6 |
-	       (bins[1] & 0xff) << 2 |
-	       (bins[2] & 0x03) << 10;
-}
+/********/
+/* TX99 */
+/********/
 
-/* return the max magnitude from the all/upper/lower bins */
-static inline u8 spectral_max_index(u8 *bins)
+#ifdef CONFIG_ATH9K_TX99
+void ath9k_tx99_init_debug(struct ath_softc *sc);
+int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
+		    struct ath_tx_control *txctl);
+#else
+static inline void ath9k_tx99_init_debug(struct ath_softc *sc)
 {
-	s8 m = (bins[2] & 0xfc) >> 2;
-
-	/* TODO: this still doesn't always report the right values ... */
-	if (m > 32)
-		m |= 0xe0;
-	else
-		m &= ~0xe0;
-
-	return m + 29;
 }
-
-/* return the bitmap weight from the all/upper/lower bins */
-static inline u8 spectral_bitmap_weight(u8 *bins)
+static inline int ath9k_tx99_send(struct ath_softc *sc,
+				  struct sk_buff *skb,
+				  struct ath_tx_control *txctl)
 {
-	return bins[0] & 0x3f;
+	return 0;
 }
-
-/* FFT sample format given to userspace via debugfs.
- *
- * Please keep the type/length at the front position and change
- * other fields after adding another sample type
- *
- * TODO: this might need rework when switching to nl80211-based
- * interface.
- */
-enum ath_fft_sample_type {
-	ATH_FFT_SAMPLE_HT20 = 1,
-	ATH_FFT_SAMPLE_HT20_40,
-};
-
-struct fft_sample_tlv {
-	u8 type;	/* see ath_fft_sample */
-	__be16 length;
-	/* type dependent data follows */
-} __packed;
-
-struct fft_sample_ht20 {
-	struct fft_sample_tlv tlv;
-
-	u8 max_exp;
-
-	__be16 freq;
-	s8 rssi;
-	s8 noise;
-
-	__be16 max_magnitude;
-	u8 max_index;
-	u8 bitmap_weight;
-
-	__be64 tsf;
-
-	u8 data[SPECTRAL_HT20_NUM_BINS];
-} __packed;
-
-struct fft_sample_ht20_40 {
-	struct fft_sample_tlv tlv;
-
-	u8 channel_type;
-	__be16 freq;
-
-	s8 lower_rssi;
-	s8 upper_rssi;
-
-	__be64 tsf;
-
-	s8 lower_noise;
-	s8 upper_noise;
-
-	__be16 lower_max_magnitude;
-	__be16 upper_max_magnitude;
-
-	u8 lower_max_index;
-	u8 upper_max_index;
-
-	u8 lower_bitmap_weight;
-	u8 upper_bitmap_weight;
-
-	u8 max_exp;
-
-	u8 data[SPECTRAL_HT20_40_NUM_BINS];
-} __packed;
-
-int ath9k_tx99_init(struct ath_softc *sc);
-void ath9k_tx99_deinit(struct ath_softc *sc);
-int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
-		    struct ath_tx_control *txctl);
-
-void ath9k_tasklet(unsigned long data);
-int ath_cabq_update(struct ath_softc *);
+#endif /* CONFIG_ATH9K_TX99 */
 
 static inline void ath_read_cachesize(struct ath_common *common, int *csz)
 {
 	common->bus_ops->read_cachesize(common, csz);
 }
 
-extern struct ieee80211_ops ath9k_ops;
-extern int ath9k_modparam_nohwcrypt;
-extern int led_blink;
-extern bool is_ath9k_unloaded;
-
+void ath9k_tasklet(unsigned long data);
+int ath_cabq_update(struct ath_softc *);
 u8 ath9k_parse_mpdudensity(u8 mpdudensity);
 irqreturn_t ath_isr(int irq, void *dev);
+int ath_reset(struct ath_softc *sc);
+void ath_cancel_work(struct ath_softc *sc);
+void ath_restart_work(struct ath_softc *sc);
 int ath9k_init_device(u16 devid, struct ath_softc *sc,
 		    const struct ath_bus_ops *bus_ops);
 void ath9k_deinit_device(struct ath_softc *sc);
-void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
 void ath9k_reload_chainmask_settings(struct ath_softc *sc);
-
-void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw);
-int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
-			       enum spectral_mode spectral_mode);
-
+u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
+void ath_start_rfkill_poll(struct ath_softc *sc);
+void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+void ath9k_ps_wakeup(struct ath_softc *sc);
+void ath9k_ps_restore(struct ath_softc *sc);
 
 #ifdef CONFIG_ATH9K_PCI
 int ath_pci_init(void);
@@ -994,15 +855,4 @@ static inline int ath_ahb_init(void) { return 0; };
 static inline void ath_ahb_exit(void) {};
 #endif
 
-void ath9k_ps_wakeup(struct ath_softc *sc);
-void ath9k_ps_restore(struct ath_softc *sc);
-
-u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
-
-void ath_start_rfkill_poll(struct ath_softc *sc);
-void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
-void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
-			       struct ieee80211_vif *vif,
-			       struct ath9k_vif_iter_data *iter_data);
-
 #endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 17be35392bb4..2e8bba0eb361 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -274,18 +274,19 @@ static int ath9k_beacon_choose_slot(struct ath_softc *sc)
 	return slot;
 }
 
-void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
+static void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
 {
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
 	struct ath_vif *avp = (void *)vif->drv_priv;
-	u64 tsfadjust;
+	u32 tsfadjust;
 
 	if (avp->av_bslot == 0)
 		return;
 
-	tsfadjust = cur_conf->beacon_interval * avp->av_bslot / ATH_BCBUF;
-	avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
+	tsfadjust = cur_conf->beacon_interval * avp->av_bslot;
+	tsfadjust = TU_TO_USEC(tsfadjust) / ATH_BCBUF;
+	avp->tsf_adjust = cpu_to_le64(tsfadjust);
 
 	ath_dbg(common, CONFIG, "tsfadjust is: %llu for bslot: %d\n",
 		(unsigned long long)tsfadjust, avp->av_bslot);
@@ -336,8 +337,14 @@ void ath9k_beacon_tasklet(unsigned long data)
 
 		ath9k_hw_check_nav(ah);
 
-		if (!ath9k_hw_check_alive(ah))
-			ieee80211_queue_work(sc->hw, &sc->hw_check_work);
+		/*
+		 * If the previous beacon has not been transmitted
+		 * and a MAC/BB hang has been identified, return
+		 * here because a chip reset would have been
+		 * initiated.
+		 */
+		if (!ath_hw_check(sc))
+			return;
 
 		if (sc->beacon.bmisscnt < BSTUCK_THRESH * sc->nbcnvifs) {
 			ath_dbg(common, BSTUCK,
@@ -431,6 +438,33 @@ static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt,
 	ath9k_hw_enable_interrupts(ah);
 }
 
+/* Calculate the modulo of a 64 bit TSF snapshot with a TU divisor */
+static u32 ath9k_mod_tsf64_tu(u64 tsf, u32 div_tu)
+{
+	u32 tsf_mod, tsf_hi, tsf_lo, mod_hi, mod_lo;
+
+	tsf_mod = tsf & (BIT(10) - 1);
+	tsf_hi = tsf >> 32;
+	tsf_lo = ((u32) tsf) >> 10;
+
+	mod_hi = tsf_hi % div_tu;
+	mod_lo = ((mod_hi << 22) + tsf_lo) % div_tu;
+
+	return (mod_lo << 10) | tsf_mod;
+}
+
+static u32 ath9k_get_next_tbtt(struct ath_softc *sc, u64 tsf,
+			       unsigned int interval)
+{
+	struct ath_hw *ah = sc->sc_ah;
+	unsigned int offset;
+
+	tsf += TU_TO_USEC(FUDGE + ah->config.sw_beacon_response_time);
+	offset = ath9k_mod_tsf64_tu(tsf, interval);
+
+	return (u32) tsf + TU_TO_USEC(interval) - offset;
+}
+
 /*
  * For multi-bss ap support beacons are either staggered evenly over N slots or
  * burst together.  For the former arrange for the SWBA to be delivered for each
@@ -446,7 +480,8 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc,
 	/* NB: the beacon interval is kept internally in TU's */
 	intval = TU_TO_USEC(conf->beacon_interval);
 	intval /= ATH_BCBUF;
-	nexttbtt = intval;
+	nexttbtt = ath9k_get_next_tbtt(sc, ath9k_hw_gettsf64(ah),
+				       conf->beacon_interval);
 
 	if (conf->enable_beacon)
 		ah->imask |= ATH9K_INT_SWBA;
@@ -458,7 +493,7 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc,
 		(conf->enable_beacon) ? "Enable" : "Disable",
 		nexttbtt, intval, conf->beacon_interval);
 
-	ath9k_beacon_init(sc, nexttbtt, intval, true);
+	ath9k_beacon_init(sc, nexttbtt, intval, false);
 }
 
 /*
@@ -475,11 +510,9 @@ static void ath9k_beacon_config_sta(struct ath_softc *sc,
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath9k_beacon_state bs;
-	int dtimperiod, dtimcount, sleepduration;
-	int cfpperiod, cfpcount;
-	u32 nexttbtt = 0, intval, tsftu;
+	int dtim_intval, sleepduration;
+	u32 nexttbtt = 0, intval;
 	u64 tsf;
-	int num_beacons, offset, dtim_dec_count, cfp_dec_count;
 
 	/* No need to configure beacon if we are not associated */
 	if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
@@ -492,53 +525,25 @@ static void ath9k_beacon_config_sta(struct ath_softc *sc,
 	intval = conf->beacon_interval;
 
 	/*
-	 * Setup dtim and cfp parameters according to
+	 * Setup dtim parameters according to
 	 * last beacon we received (which may be none).
 	 */
-	dtimperiod = conf->dtim_period;
-	dtimcount = conf->dtim_count;
-	if (dtimcount >= dtimperiod)	/* NB: sanity check */
-		dtimcount = 0;
-	cfpperiod = 1;			/* NB: no PCF support yet */
-	cfpcount = 0;
-
+	dtim_intval = intval * conf->dtim_period;
 	sleepduration = conf->listen_interval * intval;
 
 	/*
 	 * Pull nexttbtt forward to reflect the current
-	 * TSF and calculate dtim+cfp state for the result.
+	 * TSF and calculate dtim state for the result.
 	 */
 	tsf = ath9k_hw_gettsf64(ah);
-	tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
-
-	num_beacons = tsftu / intval + 1;
-	offset = tsftu % intval;
-	nexttbtt = tsftu - offset;
-	if (offset)
-		nexttbtt += intval;
-
-	/* DTIM Beacon every dtimperiod Beacon */
-	dtim_dec_count = num_beacons % dtimperiod;
-	/* CFP every cfpperiod DTIM Beacon */
-	cfp_dec_count = (num_beacons / dtimperiod) % cfpperiod;
-	if (dtim_dec_count)
-		cfp_dec_count++;
-
-	dtimcount -= dtim_dec_count;
-	if (dtimcount < 0)
-		dtimcount += dtimperiod;
-
-	cfpcount -= cfp_dec_count;
-	if (cfpcount < 0)
-		cfpcount += cfpperiod;
-
-	bs.bs_intval = intval;
+	nexttbtt = ath9k_get_next_tbtt(sc, tsf, intval);
+
+	bs.bs_intval = TU_TO_USEC(intval);
+	bs.bs_dtimperiod = conf->dtim_period * bs.bs_intval;
 	bs.bs_nexttbtt = nexttbtt;
-	bs.bs_dtimperiod = dtimperiod*intval;
-	bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
-	bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
-	bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
-	bs.bs_cfpmaxduration = 0;
+	bs.bs_nextdtim = nexttbtt;
+	if (conf->dtim_period > 1)
+		bs.bs_nextdtim = ath9k_get_next_tbtt(sc, tsf, dtim_intval);
 
 	/*
 	 * Calculate the number of consecutive beacons to miss* before taking
@@ -566,18 +571,16 @@ static void ath9k_beacon_config_sta(struct ath_softc *sc,
 	 * XXX fixed at 100ms
 	 */
 
-	bs.bs_sleepduration = roundup(IEEE80211_MS_TO_TU(100), sleepduration);
+	bs.bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
+						 sleepduration));
 	if (bs.bs_sleepduration > bs.bs_dtimperiod)
 		bs.bs_sleepduration = bs.bs_dtimperiod;
 
 	/* TSF out of range threshold fixed at 1 second */
 	bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
 
-	ath_dbg(common, BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
-	ath_dbg(common, BEACON,
-		"bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
-		bs.bs_bmissthreshold, bs.bs_sleepduration,
-		bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
+	ath_dbg(common, BEACON, "bmiss: %u sleep: %u\n",
+		bs.bs_bmissthreshold, bs.bs_sleepduration);
 
 	/* Set the computed STA beacon timers */
 
@@ -600,25 +603,11 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
 
 	intval = TU_TO_USEC(conf->beacon_interval);
 
-	if (conf->ibss_creator) {
+	if (conf->ibss_creator)
 		nexttbtt = intval;
-	} else {
-		u32 tbtt, offset, tsftu;
-		u64 tsf;
-
-		/*
-		 * Pull nexttbtt forward to reflect the current
-		 * sync'd TSF.
-		 */
-		tsf = ath9k_hw_gettsf64(ah);
-		tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
-		offset = tsftu % conf->beacon_interval;
-		tbtt = tsftu - offset;
-		if (offset)
-			tbtt += conf->beacon_interval;
-
-		nexttbtt = TU_TO_USEC(tbtt);
-	}
+	else
+		nexttbtt = ath9k_get_next_tbtt(sc, ath9k_hw_gettsf64(ah),
+					       conf->beacon_interval);
 
 	if (conf->enable_beacon)
 		ah->imask |= ATH9K_INT_SWBA;
@@ -640,7 +629,8 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
 		set_bit(SC_OP_BEACONS, &sc->sc_flags);
 }
 
-bool ath9k_allow_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
+static bool ath9k_allow_beacon_config(struct ath_softc *sc,
+				      struct ieee80211_vif *vif)
 {
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath_vif *avp = (void *)vif->drv_priv;
@@ -711,12 +701,17 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
 	unsigned long flags;
 	bool skip_beacon = false;
 
+	if (vif->type == NL80211_IFTYPE_AP)
+		ath9k_set_tsfadjust(sc, vif);
+
+	if (!ath9k_allow_beacon_config(sc, vif))
+		return;
+
 	if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
 		ath9k_cache_beacon_config(sc, bss_conf);
 		ath9k_set_beacon(sc);
 		set_bit(SC_OP_BEACONS, &sc->sc_flags);
 		return;
-
 	}
 
 	/*
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 9963b0bf9f72..3dfc2c7f1f07 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -66,7 +66,6 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
 		.bt_first_slot_time = 5,
 		.bt_hold_rx_clear = true,
 	};
-	u32 i, idx;
 	bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity;
 
 	if (AR_SREV_9300_20_OR_LATER(ah))
@@ -88,11 +87,6 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
 		SM(ath_bt_config.bt_hold_rx_clear, AR_BT_HOLD_RX_CLEAR) |
 		SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) |
 		AR_BT_DISABLE_BT_ANT;
-
-	for (i = 0; i < 32; i++) {
-		idx = (debruijn32 << i) >> 27;
-		ah->hw_gen_timers.gen_timer_index[idx] = i;
-	}
 }
 EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw);
 
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index a7e5a05b2eff..768c733cad31 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -98,10 +98,8 @@ struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
 {
 	struct ieee80211_channel *curchan = chandef->chan;
 	struct ath9k_channel *channel;
-	u8 chan_idx;
 
-	chan_idx = curchan->hw_value;
-	channel = &ah->channels[chan_idx];
+	channel = &ah->channels[curchan->hw_value];
 	ath9k_cmn_update_ichannel(channel, chandef);
 
 	return channel;
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 83a2c59f680b..ab7264c1d8f7 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -17,7 +17,6 @@
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/export.h>
-#include <linux/relay.h>
 #include <asm/unaligned.h>
 
 #include "ath9k.h"
@@ -27,6 +26,47 @@
 #define REG_READ_D(_ah, _reg) \
 	ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
 
+void ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause)
+{
+	if (sync_cause)
+		sc->debug.stats.istats.sync_cause_all++;
+	if (sync_cause & AR_INTR_SYNC_RTC_IRQ)
+		sc->debug.stats.istats.sync_rtc_irq++;
+	if (sync_cause & AR_INTR_SYNC_MAC_IRQ)
+		sc->debug.stats.istats.sync_mac_irq++;
+	if (sync_cause & AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS)
+		sc->debug.stats.istats.eeprom_illegal_access++;
+	if (sync_cause & AR_INTR_SYNC_APB_TIMEOUT)
+		sc->debug.stats.istats.apb_timeout++;
+	if (sync_cause & AR_INTR_SYNC_PCI_MODE_CONFLICT)
+		sc->debug.stats.istats.pci_mode_conflict++;
+	if (sync_cause & AR_INTR_SYNC_HOST1_FATAL)
+		sc->debug.stats.istats.host1_fatal++;
+	if (sync_cause & AR_INTR_SYNC_HOST1_PERR)
+		sc->debug.stats.istats.host1_perr++;
+	if (sync_cause & AR_INTR_SYNC_TRCV_FIFO_PERR)
+		sc->debug.stats.istats.trcv_fifo_perr++;
+	if (sync_cause & AR_INTR_SYNC_RADM_CPL_EP)
+		sc->debug.stats.istats.radm_cpl_ep++;
+	if (sync_cause & AR_INTR_SYNC_RADM_CPL_DLLP_ABORT)
+		sc->debug.stats.istats.radm_cpl_dllp_abort++;
+	if (sync_cause & AR_INTR_SYNC_RADM_CPL_TLP_ABORT)
+		sc->debug.stats.istats.radm_cpl_tlp_abort++;
+	if (sync_cause & AR_INTR_SYNC_RADM_CPL_ECRC_ERR)
+		sc->debug.stats.istats.radm_cpl_ecrc_err++;
+	if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT)
+		sc->debug.stats.istats.radm_cpl_timeout++;
+	if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
+		sc->debug.stats.istats.local_timeout++;
+	if (sync_cause & AR_INTR_SYNC_PM_ACCESS)
+		sc->debug.stats.istats.pm_access++;
+	if (sync_cause & AR_INTR_SYNC_MAC_AWAKE)
+		sc->debug.stats.istats.mac_awake++;
+	if (sync_cause & AR_INTR_SYNC_MAC_ASLEEP)
+		sc->debug.stats.istats.mac_asleep++;
+	if (sync_cause & AR_INTR_SYNC_MAC_SLEEP_ACCESS)
+		sc->debug.stats.istats.mac_sleep_access++;
+}
 
 static ssize_t ath9k_debugfs_read_buf(struct file *file, char __user *user_buf,
 				      size_t count, loff_t *ppos)
@@ -903,14 +943,10 @@ static const struct file_operations fops_reset = {
 static ssize_t read_file_recv(struct file *file, char __user *user_buf,
 			      size_t count, loff_t *ppos)
 {
-#define PHY_ERR(s, p) \
-	len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
-			 sc->debug.stats.rxstats.phy_err_stats[p]);
-
 #define RXS_ERR(s, e)					    \
 	do {						    \
 		len += scnprintf(buf + len, size - len,	    \
-				 "%22s : %10u\n", s,	    \
+				 "%18s : %10u\n", s,	    \
 				 sc->debug.stats.rxstats.e);\
 	} while (0)
 
@@ -923,6 +959,12 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
 	if (buf == NULL)
 		return -ENOMEM;
 
+	RXS_ERR("PKTS-ALL", rx_pkts_all);
+	RXS_ERR("BYTES-ALL", rx_bytes_all);
+	RXS_ERR("BEACONS", rx_beacons);
+	RXS_ERR("FRAGS", rx_frags);
+	RXS_ERR("SPECTRAL", rx_spectral);
+
 	RXS_ERR("CRC ERR", crc_err);
 	RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);
 	RXS_ERR("PHY ERR", phy_err);
@@ -930,43 +972,10 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
 	RXS_ERR("PRE-DELIM CRC ERR", pre_delim_crc_err);
 	RXS_ERR("POST-DELIM CRC ERR", post_delim_crc_err);
 	RXS_ERR("DECRYPT BUSY ERR", decrypt_busy_err);
-	RXS_ERR("RX-LENGTH-ERR", rx_len_err);
-	RXS_ERR("RX-OOM-ERR", rx_oom_err);
-	RXS_ERR("RX-RATE-ERR", rx_rate_err);
-	RXS_ERR("RX-TOO-MANY-FRAGS", rx_too_many_frags_err);
-
-	PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
-	PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
-	PHY_ERR("PARITY ERR", ATH9K_PHYERR_PARITY);
-	PHY_ERR("RATE ERR", ATH9K_PHYERR_RATE);
-	PHY_ERR("LENGTH ERR", ATH9K_PHYERR_LENGTH);
-	PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR);
-	PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE);
-	PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR);
-	PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING);
-	PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
-	PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
-	PHY_ERR("OFDM-LENGTH ERR", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
-	PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP);
-	PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE);
-	PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART);
-	PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
-	PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING);
-	PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC);
-	PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
-	PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
-	PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
-	PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
-	PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP);
-	PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR);
-	PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
-	PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
-
-	RXS_ERR("RX-Pkts-All", rx_pkts_all);
-	RXS_ERR("RX-Bytes-All", rx_bytes_all);
-	RXS_ERR("RX-Beacons", rx_beacons);
-	RXS_ERR("RX-Frags", rx_frags);
-	RXS_ERR("RX-Spectral", rx_spectral);
+	RXS_ERR("LENGTH-ERR", rx_len_err);
+	RXS_ERR("OOM-ERR", rx_oom_err);
+	RXS_ERR("RATE-ERR", rx_rate_err);
+	RXS_ERR("TOO-MANY-FRAGS", rx_too_many_frags_err);
 
 	if (len > size)
 		len = size;
@@ -977,7 +986,6 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
 	return retval;
 
 #undef RXS_ERR
-#undef PHY_ERR
 }
 
 void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
@@ -1016,293 +1024,67 @@ static const struct file_operations fops_recv = {
 	.llseek = default_llseek,
 };
 
-static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
-				       size_t count, loff_t *ppos)
-{
-	struct ath_softc *sc = file->private_data;
-	char *mode = "";
-	unsigned int len;
-
-	switch (sc->spectral_mode) {
-	case SPECTRAL_DISABLED:
-		mode = "disable";
-		break;
-	case SPECTRAL_BACKGROUND:
-		mode = "background";
-		break;
-	case SPECTRAL_CHANSCAN:
-		mode = "chanscan";
-		break;
-	case SPECTRAL_MANUAL:
-		mode = "manual";
-		break;
-	}
-	len = strlen(mode);
-	return simple_read_from_buffer(user_buf, count, ppos, mode, len);
-}
-
-static ssize_t write_file_spec_scan_ctl(struct file *file,
-					const char __user *user_buf,
-					size_t count, loff_t *ppos)
-{
-	struct ath_softc *sc = file->private_data;
-	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-	char buf[32];
-	ssize_t len;
-
-	if (config_enabled(CONFIG_ATH9K_TX99))
-		return -EOPNOTSUPP;
-
-	len = min(count, sizeof(buf) - 1);
-	if (copy_from_user(buf, user_buf, len))
-		return -EFAULT;
-
-	buf[len] = '\0';
-
-	if (strncmp("trigger", buf, 7) == 0) {
-		ath9k_spectral_scan_trigger(sc->hw);
-	} else if (strncmp("background", buf, 9) == 0) {
-		ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND);
-		ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n");
-	} else if (strncmp("chanscan", buf, 8) == 0) {
-		ath9k_spectral_scan_config(sc->hw, SPECTRAL_CHANSCAN);
-		ath_dbg(common, CONFIG, "spectral scan: channel scan mode enabled\n");
-	} else if (strncmp("manual", buf, 6) == 0) {
-		ath9k_spectral_scan_config(sc->hw, SPECTRAL_MANUAL);
-		ath_dbg(common, CONFIG, "spectral scan: manual mode enabled\n");
-	} else if (strncmp("disable", buf, 7) == 0) {
-		ath9k_spectral_scan_config(sc->hw, SPECTRAL_DISABLED);
-		ath_dbg(common, CONFIG, "spectral scan: disabled\n");
-	} else {
-		return -EINVAL;
-	}
-
-	return count;
-}
-
-static const struct file_operations fops_spec_scan_ctl = {
-	.read = read_file_spec_scan_ctl,
-	.write = write_file_spec_scan_ctl,
-	.open = simple_open,
-	.owner = THIS_MODULE,
-	.llseek = default_llseek,
-};
-
-static ssize_t read_file_spectral_short_repeat(struct file *file,
-					       char __user *user_buf,
-					       size_t count, loff_t *ppos)
-{
-	struct ath_softc *sc = file->private_data;
-	char buf[32];
-	unsigned int len;
-
-	len = sprintf(buf, "%d\n", sc->spec_config.short_repeat);
-	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t write_file_spectral_short_repeat(struct file *file,
-						const char __user *user_buf,
-						size_t count, loff_t *ppos)
-{
-	struct ath_softc *sc = file->private_data;
-	unsigned long val;
-	char buf[32];
-	ssize_t len;
-
-	len = min(count, sizeof(buf) - 1);
-	if (copy_from_user(buf, user_buf, len))
-		return -EFAULT;
-
-	buf[len] = '\0';
-	if (kstrtoul(buf, 0, &val))
-		return -EINVAL;
-
-	if (val < 0 || val > 1)
-		return -EINVAL;
-
-	sc->spec_config.short_repeat = val;
-	return count;
-}
-
-static const struct file_operations fops_spectral_short_repeat = {
-	.read = read_file_spectral_short_repeat,
-	.write = write_file_spectral_short_repeat,
-	.open = simple_open,
-	.owner = THIS_MODULE,
-	.llseek = default_llseek,
-};
-
-static ssize_t read_file_spectral_count(struct file *file,
-					char __user *user_buf,
-					size_t count, loff_t *ppos)
-{
-	struct ath_softc *sc = file->private_data;
-	char buf[32];
-	unsigned int len;
-
-	len = sprintf(buf, "%d\n", sc->spec_config.count);
-	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t write_file_spectral_count(struct file *file,
-					 const char __user *user_buf,
-					 size_t count, loff_t *ppos)
+static ssize_t read_file_phy_err(struct file *file, char __user *user_buf,
+				 size_t count, loff_t *ppos)
 {
-	struct ath_softc *sc = file->private_data;
-	unsigned long val;
-	char buf[32];
-	ssize_t len;
-
-	len = min(count, sizeof(buf) - 1);
-	if (copy_from_user(buf, user_buf, len))
-		return -EFAULT;
-
-	buf[len] = '\0';
-	if (kstrtoul(buf, 0, &val))
-		return -EINVAL;
-
-	if (val < 0 || val > 255)
-		return -EINVAL;
-
-	sc->spec_config.count = val;
-	return count;
-}
-
-static const struct file_operations fops_spectral_count = {
-	.read = read_file_spectral_count,
-	.write = write_file_spectral_count,
-	.open = simple_open,
-	.owner = THIS_MODULE,
-	.llseek = default_llseek,
-};
-
-static ssize_t read_file_spectral_period(struct file *file,
-					 char __user *user_buf,
-					 size_t count, loff_t *ppos)
-{
-	struct ath_softc *sc = file->private_data;
-	char buf[32];
-	unsigned int len;
-
-	len = sprintf(buf, "%d\n", sc->spec_config.period);
-	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t write_file_spectral_period(struct file *file,
-					  const char __user *user_buf,
-					  size_t count, loff_t *ppos)
-{
-	struct ath_softc *sc = file->private_data;
-	unsigned long val;
-	char buf[32];
-	ssize_t len;
-
-	len = min(count, sizeof(buf) - 1);
-	if (copy_from_user(buf, user_buf, len))
-		return -EFAULT;
-
-	buf[len] = '\0';
-	if (kstrtoul(buf, 0, &val))
-		return -EINVAL;
-
-	if (val < 0 || val > 255)
-		return -EINVAL;
-
-	sc->spec_config.period = val;
-	return count;
-}
-
-static const struct file_operations fops_spectral_period = {
-	.read = read_file_spectral_period,
-	.write = write_file_spectral_period,
-	.open = simple_open,
-	.owner = THIS_MODULE,
-	.llseek = default_llseek,
-};
+#define PHY_ERR(s, p) \
+	len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
+			 sc->debug.stats.rxstats.phy_err_stats[p]);
 
-static ssize_t read_file_spectral_fft_period(struct file *file,
-					     char __user *user_buf,
-					     size_t count, loff_t *ppos)
-{
 	struct ath_softc *sc = file->private_data;
-	char buf[32];
-	unsigned int len;
+	char *buf;
+	unsigned int len = 0, size = 1600;
+	ssize_t retval = 0;
 
-	len = sprintf(buf, "%d\n", sc->spec_config.fft_period);
-	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
+	buf = kzalloc(size, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
 
-static ssize_t write_file_spectral_fft_period(struct file *file,
-					      const char __user *user_buf,
-					      size_t count, loff_t *ppos)
-{
-	struct ath_softc *sc = file->private_data;
-	unsigned long val;
-	char buf[32];
-	ssize_t len;
+	PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
+	PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
+	PHY_ERR("PARITY ERR", ATH9K_PHYERR_PARITY);
+	PHY_ERR("RATE ERR", ATH9K_PHYERR_RATE);
+	PHY_ERR("LENGTH ERR", ATH9K_PHYERR_LENGTH);
+	PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR);
+	PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE);
+	PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR);
+	PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING);
+	PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
+	PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
+	PHY_ERR("OFDM-LENGTH ERR", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
+	PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP);
+	PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE);
+	PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART);
+	PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
+	PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING);
+	PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC);
+	PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
+	PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
+	PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
+	PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
+	PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP);
+	PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR);
+	PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
+	PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
 
-	len = min(count, sizeof(buf) - 1);
-	if (copy_from_user(buf, user_buf, len))
-		return -EFAULT;
+	if (len > size)
+		len = size;
 
-	buf[len] = '\0';
-	if (kstrtoul(buf, 0, &val))
-		return -EINVAL;
+	retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
 
-	if (val < 0 || val > 15)
-		return -EINVAL;
+	return retval;
 
-	sc->spec_config.fft_period = val;
-	return count;
+#undef PHY_ERR
 }
 
-static const struct file_operations fops_spectral_fft_period = {
-	.read = read_file_spectral_fft_period,
-	.write = write_file_spectral_fft_period,
+static const struct file_operations fops_phy_err = {
+	.read = read_file_phy_err,
 	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
 };
 
-static struct dentry *create_buf_file_handler(const char *filename,
-					      struct dentry *parent,
-					      umode_t mode,
-					      struct rchan_buf *buf,
-					      int *is_global)
-{
-	struct dentry *buf_file;
-
-	buf_file = debugfs_create_file(filename, mode, parent, buf,
-				       &relay_file_operations);
-	*is_global = 1;
-	return buf_file;
-}
-
-static int remove_buf_file_handler(struct dentry *dentry)
-{
-	debugfs_remove(dentry);
-
-	return 0;
-}
-
-void ath_debug_send_fft_sample(struct ath_softc *sc,
-			       struct fft_sample_tlv *fft_sample_tlv)
-{
-	int length;
-	if (!sc->rfs_chan_spec_scan)
-		return;
-
-	length = __be16_to_cpu(fft_sample_tlv->length) +
-		 sizeof(*fft_sample_tlv);
-	relay_write(sc->rfs_chan_spec_scan, fft_sample_tlv, length);
-}
-
-static struct rchan_callbacks rfs_spec_scan_cb = {
-	.create_buf_file = create_buf_file_handler,
-	.remove_buf_file = remove_buf_file_handler,
-};
-
-
 static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
                                 size_t count, loff_t *ppos)
 {
@@ -1569,86 +1351,6 @@ static const struct file_operations fops_btcoex = {
 };
 #endif
 
-static ssize_t read_file_node_stat(struct file *file, char __user *user_buf,
-				   size_t count, loff_t *ppos)
-{
-	struct ath_node *an = file->private_data;
-	struct ath_softc *sc = an->sc;
-	struct ath_atx_tid *tid;
-	struct ath_atx_ac *ac;
-	struct ath_txq *txq;
-	u32 len = 0, size = 4096;
-	char *buf;
-	size_t retval;
-	int tidno, acno;
-
-	buf = kzalloc(size, GFP_KERNEL);
-	if (buf == NULL)
-		return -ENOMEM;
-
-	if (!an->sta->ht_cap.ht_supported) {
-		len = scnprintf(buf, size, "%s\n",
-				"HT not supported");
-		goto exit;
-	}
-
-	len = scnprintf(buf, size, "Max-AMPDU: %d\n",
-			an->maxampdu);
-	len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n",
-			 an->mpdudensity);
-
-	len += scnprintf(buf + len, size - len,
-			 "%2s%7s\n", "AC", "SCHED");
-
-	for (acno = 0, ac = &an->ac[acno];
-	     acno < IEEE80211_NUM_ACS; acno++, ac++) {
-		txq = ac->txq;
-		ath_txq_lock(sc, txq);
-		len += scnprintf(buf + len, size - len,
-				 "%2d%7d\n",
-				 acno, ac->sched);
-		ath_txq_unlock(sc, txq);
-	}
-
-	len += scnprintf(buf + len, size - len,
-			 "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
-			 "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
-			 "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
-
-	for (tidno = 0, tid = &an->tid[tidno];
-	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
-		txq = tid->ac->txq;
-		ath_txq_lock(sc, txq);
-		len += scnprintf(buf + len, size - len,
-				 "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
-				 tid->tidno, tid->seq_start, tid->seq_next,
-				 tid->baw_size, tid->baw_head, tid->baw_tail,
-				 tid->bar_index, tid->sched, tid->paused);
-		ath_txq_unlock(sc, txq);
-	}
-exit:
-	retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
-	kfree(buf);
-
-	return retval;
-}
-
-static const struct file_operations fops_node_stat = {
-	.read = read_file_node_stat,
-	.open = simple_open,
-	.owner = THIS_MODULE,
-	.llseek = default_llseek,
-};
-
-void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
-			   struct ieee80211_vif *vif,
-			   struct ieee80211_sta *sta,
-			   struct dentry *dir)
-{
-	struct ath_node *an = (struct ath_node *)sta->drv_priv;
-	debugfs_create_file("node_stat", S_IRUGO, dir, an, &fops_node_stat);
-}
-
 /* Ethtool support for get-stats */
 
 #define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
@@ -1772,117 +1474,9 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw,
 
 void ath9k_deinit_debug(struct ath_softc *sc)
 {
-	if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
-		relay_close(sc->rfs_chan_spec_scan);
-		sc->rfs_chan_spec_scan = NULL;
-	}
-}
-
-static ssize_t read_file_tx99(struct file *file, char __user *user_buf,
-			      size_t count, loff_t *ppos)
-{
-	struct ath_softc *sc = file->private_data;
-	char buf[3];
-	unsigned int len;
-
-	len = sprintf(buf, "%d\n", sc->tx99_state);
-	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
-			       size_t count, loff_t *ppos)
-{
-	struct ath_softc *sc = file->private_data;
-	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-	char buf[32];
-	bool start;
-	ssize_t len;
-	int r;
-
-	if (sc->nvifs > 1)
-		return -EOPNOTSUPP;
-
-	len = min(count, sizeof(buf) - 1);
-	if (copy_from_user(buf, user_buf, len))
-		return -EFAULT;
-
-	if (strtobool(buf, &start))
-		return -EINVAL;
-
-	if (start == sc->tx99_state) {
-		if (!start)
-			return count;
-		ath_dbg(common, XMIT, "Resetting TX99\n");
-		ath9k_tx99_deinit(sc);
-	}
-
-	if (!start) {
-		ath9k_tx99_deinit(sc);
-		return count;
-	}
-
-	r = ath9k_tx99_init(sc);
-	if (r)
-		return r;
-
-	return count;
-}
-
-static const struct file_operations fops_tx99 = {
-	.read = read_file_tx99,
-	.write = write_file_tx99,
-	.open = simple_open,
-	.owner = THIS_MODULE,
-	.llseek = default_llseek,
-};
-
-static ssize_t read_file_tx99_power(struct file *file,
-				    char __user *user_buf,
-				    size_t count, loff_t *ppos)
-{
-	struct ath_softc *sc = file->private_data;
-	char buf[32];
-	unsigned int len;
-
-	len = sprintf(buf, "%d (%d dBm)\n",
-		      sc->tx99_power,
-		      sc->tx99_power / 2);
-
-	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	ath9k_spectral_deinit_debug(sc);
 }
 
-static ssize_t write_file_tx99_power(struct file *file,
-				     const char __user *user_buf,
-				     size_t count, loff_t *ppos)
-{
-	struct ath_softc *sc = file->private_data;
-	int r;
-	u8 tx_power;
-
-	r = kstrtou8_from_user(user_buf, count, 0, &tx_power);
-	if (r)
-		return r;
-
-	if (tx_power > MAX_RATE_POWER)
-		return -EINVAL;
-
-	sc->tx99_power = tx_power;
-
-	ath9k_ps_wakeup(sc);
-	ath9k_hw_tx99_set_txpower(sc->sc_ah, sc->tx99_power);
-	ath9k_ps_restore(sc);
-
-	return count;
-}
-
-static const struct file_operations fops_tx99_power = {
-	.read = read_file_tx99_power,
-	.write = write_file_tx99_power,
-	.open = simple_open,
-	.owner = THIS_MODULE,
-	.llseek = default_llseek,
-};
-
 int ath9k_init_debug(struct ath_hw *ah)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
@@ -1899,6 +1493,8 @@ int ath9k_init_debug(struct ath_hw *ah)
 #endif
 
 	ath9k_dfs_init_debug(sc);
+	ath9k_tx99_init_debug(sc);
+	ath9k_spectral_init_debug(sc);
 
 	debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc,
 			    &fops_dma);
@@ -1922,6 +1518,8 @@ int ath9k_init_debug(struct ath_hw *ah)
 			    &fops_reset);
 	debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc,
 			    &fops_recv);
+	debugfs_create_file("phy_err", S_IRUSR, sc->debug.debugfs_phy, sc,
+			    &fops_phy_err);
 	debugfs_create_u8("rx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
 			  &ah->rxchainmask);
 	debugfs_create_u8("tx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
@@ -1945,23 +1543,6 @@ int ath9k_init_debug(struct ath_hw *ah)
 			    &fops_base_eeprom);
 	debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc,
 			    &fops_modal_eeprom);
-	sc->rfs_chan_spec_scan = relay_open("spectral_scan",
-					    sc->debug.debugfs_phy,
-					    1024, 256, &rfs_spec_scan_cb,
-					    NULL);
-	debugfs_create_file("spectral_scan_ctl", S_IRUSR | S_IWUSR,
-			    sc->debug.debugfs_phy, sc,
-			    &fops_spec_scan_ctl);
-	debugfs_create_file("spectral_short_repeat", S_IRUSR | S_IWUSR,
-			    sc->debug.debugfs_phy, sc,
-			    &fops_spectral_short_repeat);
-	debugfs_create_file("spectral_count", S_IRUSR | S_IWUSR,
-			    sc->debug.debugfs_phy, sc, &fops_spectral_count);
-	debugfs_create_file("spectral_period", S_IRUSR | S_IWUSR,
-			    sc->debug.debugfs_phy, sc, &fops_spectral_period);
-	debugfs_create_file("spectral_fft_period", S_IRUSR | S_IWUSR,
-			    sc->debug.debugfs_phy, sc,
-			    &fops_spectral_fft_period);
 	debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
 			   sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
 	debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
@@ -1974,15 +1555,6 @@ int ath9k_init_debug(struct ath_hw *ah)
 	debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
 			    &fops_btcoex);
 #endif
-	if (config_enabled(CONFIG_ATH9K_TX99) &&
-	    AR_SREV_9300_20_OR_LATER(ah)) {
-		debugfs_create_file("tx99", S_IRUSR | S_IWUSR,
-				    sc->debug.debugfs_phy, sc,
-				    &fops_tx99);
-		debugfs_create_file("tx99_power", S_IRUSR | S_IWUSR,
-				    sc->debug.debugfs_phy, sc,
-				    &fops_tx99_power);
-	}
 
 	return 0;
 }
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index d6e3fa4299a4..cc7a025d833e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -27,11 +27,13 @@ struct fft_sample_tlv;
 
 #ifdef CONFIG_ATH9K_DEBUGFS
 #define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++
+#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
 #define RESET_STAT_INC(sc, type) sc->debug.stats.reset[type]++
 #define ANT_STAT_INC(i, c) sc->debug.stats.ant_stats[i].c++
 #define ANT_LNA_INC(i, c) sc->debug.stats.ant_stats[i].lna_recv_cnt[c]++;
 #else
 #define TX_STAT_INC(q, c) do { } while (0)
+#define RX_STAT_INC(c)
 #define RESET_STAT_INC(sc, type) do { } while (0)
 #define ANT_STAT_INC(i, c) do { } while (0)
 #define ANT_LNA_INC(i, c) do { } while (0)
@@ -42,6 +44,7 @@ enum ath_reset_type {
 	RESET_TYPE_BB_WATCHDOG,
 	RESET_TYPE_FATAL_INT,
 	RESET_TYPE_TX_ERROR,
+	RESET_TYPE_TX_GTT,
 	RESET_TYPE_TX_HANG,
 	RESET_TYPE_PLL_HANG,
 	RESET_TYPE_MAC_HANG,
@@ -201,7 +204,23 @@ struct ath_tx_stats {
 				 TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
 	} while(0)
 
-#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
+struct ath_rx_rate_stats {
+	struct {
+		u32 ht20_cnt;
+		u32 ht40_cnt;
+		u32 sgi_cnt;
+		u32 lgi_cnt;
+	} ht_stats[24];
+
+	struct {
+		u32 ofdm_cnt;
+	} ofdm_stats[8];
+
+	struct {
+		u32 cck_lp_cnt;
+		u32 cck_sp_cnt;
+	} cck_stats[4];
+};
 
 /**
  * struct ath_rx_stats - RX Statistics
@@ -292,14 +311,12 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
 			   struct ieee80211_vif *vif,
 			   struct ieee80211_sta *sta,
 			   struct dentry *dir);
-void ath_debug_send_fft_sample(struct ath_softc *sc,
-			       struct fft_sample_tlv *fft_sample);
 void ath9k_debug_stat_ant(struct ath_softc *sc,
 			  struct ath_hw_antcomb_conf *div_ant_conf,
 			  int main_rssi_avg, int alt_rssi_avg);
-#else
+void ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause);
 
-#define RX_STAT_INC(c) /* NOP */
+#else
 
 static inline int ath9k_init_debug(struct ath_hw *ah)
 {
@@ -331,6 +348,23 @@ static inline void ath9k_debug_stat_ant(struct ath_softc *sc,
 
 }
 
+static inline void
+ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause)
+{
+}
+
 #endif /* CONFIG_ATH9K_DEBUGFS */
 
+#ifdef CONFIG_ATH9K_STATION_STATISTICS
+void ath_debug_rate_stats(struct ath_softc *sc,
+			  struct ath_rx_status *rs,
+			  struct sk_buff *skb);
+#else
+static inline void ath_debug_rate_stats(struct ath_softc *sc,
+					struct ath_rx_status *rs,
+					struct sk_buff *skb)
+{
+}
+#endif /* CONFIG_ATH9K_STATION_STATISTICS */
+
 #endif /* DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
new file mode 100644
index 000000000000..d76e6e0120d2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+/*************/
+/* node_aggr */
+/*************/
+
+static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	struct ath_node *an = file->private_data;
+	struct ath_softc *sc = an->sc;
+	struct ath_atx_tid *tid;
+	struct ath_atx_ac *ac;
+	struct ath_txq *txq;
+	u32 len = 0, size = 4096;
+	char *buf;
+	size_t retval;
+	int tidno, acno;
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	if (!an->sta->ht_cap.ht_supported) {
+		len = scnprintf(buf, size, "%s\n",
+				"HT not supported");
+		goto exit;
+	}
+
+	len = scnprintf(buf, size, "Max-AMPDU: %d\n",
+			an->maxampdu);
+	len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n",
+			 an->mpdudensity);
+
+	len += scnprintf(buf + len, size - len,
+			 "%2s%7s\n", "AC", "SCHED");
+
+	for (acno = 0, ac = &an->ac[acno];
+	     acno < IEEE80211_NUM_ACS; acno++, ac++) {
+		txq = ac->txq;
+		ath_txq_lock(sc, txq);
+		len += scnprintf(buf + len, size - len,
+				 "%2d%7d\n",
+				 acno, ac->sched);
+		ath_txq_unlock(sc, txq);
+	}
+
+	len += scnprintf(buf + len, size - len,
+			 "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
+			 "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
+			 "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
+
+	for (tidno = 0, tid = &an->tid[tidno];
+	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
+		txq = tid->ac->txq;
+		ath_txq_lock(sc, txq);
+		if (tid->active) {
+			len += scnprintf(buf + len, size - len,
+					 "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
+					 tid->tidno,
+					 tid->seq_start,
+					 tid->seq_next,
+					 tid->baw_size,
+					 tid->baw_head,
+					 tid->baw_tail,
+					 tid->bar_index,
+					 tid->sched,
+					 tid->paused);
+		}
+		ath_txq_unlock(sc, txq);
+	}
+exit:
+	retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+
+	return retval;
+}
+
+static const struct file_operations fops_node_aggr = {
+	.read = read_file_node_aggr,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+/*************/
+/* node_recv */
+/*************/
+
+void ath_debug_rate_stats(struct ath_softc *sc,
+			  struct ath_rx_status *rs,
+			  struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+	struct ath_hw *ah = sc->sc_ah;
+	struct ieee80211_rx_status *rxs;
+	struct ath_rx_rate_stats *rstats;
+	struct ieee80211_sta *sta;
+	struct ath_node *an;
+
+	if (!ieee80211_is_data(hdr->frame_control))
+		return;
+
+	rcu_read_lock();
+
+	sta = ieee80211_find_sta_by_ifaddr(sc->hw, hdr->addr2, NULL);
+	if (!sta)
+		goto exit;
+
+	an = (struct ath_node *) sta->drv_priv;
+	rstats = &an->rx_rate_stats;
+	rxs = IEEE80211_SKB_RXCB(skb);
+
+	if (IS_HT_RATE(rs->rs_rate)) {
+		if (rxs->rate_idx >= ARRAY_SIZE(rstats->ht_stats))
+			goto exit;
+
+		if (rxs->flag & RX_FLAG_40MHZ)
+			rstats->ht_stats[rxs->rate_idx].ht40_cnt++;
+		else
+			rstats->ht_stats[rxs->rate_idx].ht20_cnt++;
+
+		if (rxs->flag & RX_FLAG_SHORT_GI)
+			rstats->ht_stats[rxs->rate_idx].sgi_cnt++;
+		else
+			rstats->ht_stats[rxs->rate_idx].lgi_cnt++;
+
+		goto exit;
+	}
+
+	if (IS_CCK_RATE(rs->rs_rate)) {
+		if (rxs->flag & RX_FLAG_SHORTPRE)
+			rstats->cck_stats[rxs->rate_idx].cck_sp_cnt++;
+		else
+			rstats->cck_stats[rxs->rate_idx].cck_lp_cnt++;
+
+		goto exit;
+	}
+
+	if (IS_OFDM_RATE(rs->rs_rate)) {
+		if (ah->curchan->chan->band == IEEE80211_BAND_2GHZ)
+			rstats->ofdm_stats[rxs->rate_idx - 4].ofdm_cnt++;
+		else
+			rstats->ofdm_stats[rxs->rate_idx].ofdm_cnt++;
+	}
+exit:
+	rcu_read_unlock();
+}
+
+#define PRINT_CCK_RATE(str, i, sp)					\
+	do {								\
+		len += scnprintf(buf + len, size - len,			\
+			 "%11s : %10u\n",				\
+			 str,						\
+			 (sp) ? rstats->cck_stats[i].cck_sp_cnt :	\
+			 rstats->cck_stats[i].cck_lp_cnt);		\
+	} while (0)
+
+#define PRINT_OFDM_RATE(str, i)					\
+	do {							\
+		len += scnprintf(buf + len, size - len,		\
+			 "%11s : %10u\n",			\
+			 str,					\
+			 rstats->ofdm_stats[i].ofdm_cnt);	\
+	} while (0)
+
+static ssize_t read_file_node_recv(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	struct ath_node *an = file->private_data;
+	struct ath_softc *sc = an->sc;
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_rx_rate_stats *rstats;
+	struct ieee80211_sta *sta = an->sta;
+	enum ieee80211_band band;
+	u32 len = 0, size = 4096;
+	char *buf;
+	size_t retval;
+	int i;
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	band = ah->curchan->chan->band;
+	rstats = &an->rx_rate_stats;
+
+	if (!sta->ht_cap.ht_supported)
+		goto legacy;
+
+	len += scnprintf(buf + len, size - len,
+			 "%24s%10s%10s%10s\n",
+			 "HT20", "HT40", "SGI", "LGI");
+
+	for (i = 0; i < 24; i++) {
+		len += scnprintf(buf + len, size - len,
+				 "%8s%3u : %10u%10u%10u%10u\n",
+				 "MCS", i,
+				 rstats->ht_stats[i].ht20_cnt,
+				 rstats->ht_stats[i].ht40_cnt,
+				 rstats->ht_stats[i].sgi_cnt,
+				 rstats->ht_stats[i].lgi_cnt);
+	}
+
+	len += scnprintf(buf + len, size - len, "\n");
+
+legacy:
+	if (band == IEEE80211_BAND_2GHZ) {
+		PRINT_CCK_RATE("CCK-1M/LP", 0, false);
+		PRINT_CCK_RATE("CCK-2M/LP", 1, false);
+		PRINT_CCK_RATE("CCK-5.5M/LP", 2, false);
+		PRINT_CCK_RATE("CCK-11M/LP", 3, false);
+
+		PRINT_CCK_RATE("CCK-2M/SP", 1, true);
+		PRINT_CCK_RATE("CCK-5.5M/SP", 2, true);
+		PRINT_CCK_RATE("CCK-11M/SP", 3, true);
+	}
+
+	PRINT_OFDM_RATE("OFDM-6M", 0);
+	PRINT_OFDM_RATE("OFDM-9M", 1);
+	PRINT_OFDM_RATE("OFDM-12M", 2);
+	PRINT_OFDM_RATE("OFDM-18M", 3);
+	PRINT_OFDM_RATE("OFDM-24M", 4);
+	PRINT_OFDM_RATE("OFDM-36M", 5);
+	PRINT_OFDM_RATE("OFDM-48M", 6);
+	PRINT_OFDM_RATE("OFDM-54M", 7);
+
+	retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+
+	return retval;
+}
+
+#undef PRINT_OFDM_RATE
+#undef PRINT_CCK_RATE
+
+static const struct file_operations fops_node_recv = {
+	.read = read_file_node_recv,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
+			   struct ieee80211_vif *vif,
+			   struct ieee80211_sta *sta,
+			   struct dentry *dir)
+{
+	struct ath_node *an = (struct ath_node *)sta->drv_priv;
+
+	debugfs_create_file("node_aggr", S_IRUGO, dir, an, &fops_node_aggr);
+	debugfs_create_file("node_recv", S_IRUGO, dir, an, &fops_node_recv);
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 7187d3671512..857bb28b3894 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -158,8 +158,8 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
 		return;
 	}
 
-	ard.rssi = rs->rs_rssi_ctl0;
-	ard.ext_rssi = rs->rs_rssi_ext0;
+	ard.rssi = rs->rs_rssi_ctl[0];
+	ard.ext_rssi = rs->rs_rssi_ext[0];
 
 	/*
 	 * hardware stores this as 8 bit signed value.
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index b4091716e9b3..07b806c56c56 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -1085,31 +1085,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
 
 static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
 {
-#define EEP_MAP4K_SPURCHAN \
-	(ah->eeprom.map4k.modalHeader.spurChans[i].spurChan)
-	struct ath_common *common = ath9k_hw_common(ah);
-
-	u16 spur_val = AR_NO_SPUR;
-
-	ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
-		i, is2GHz, ah->config.spurchans[i][is2GHz]);
-
-	switch (ah->config.spurmode) {
-	case SPUR_DISABLE:
-		break;
-	case SPUR_ENABLE_IOCTL:
-		spur_val = ah->config.spurchans[i][is2GHz];
-		ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
-			spur_val);
-		break;
-	case SPUR_ENABLE_EEPROM:
-		spur_val = EEP_MAP4K_SPURCHAN;
-		break;
-	}
-
-	return spur_val;
-
-#undef EEP_MAP4K_SPURCHAN
+	return ah->eeprom.map4k.modalHeader.spurChans[i].spurChan;
 }
 
 const struct eeprom_ops eep_4k_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index e1d0c217c104..5ba1385c9838 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -1004,31 +1004,7 @@ static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
 static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
 					    u16 i, bool is2GHz)
 {
-#define EEP_MAP9287_SPURCHAN \
-	(ah->eeprom.map9287.modalHeader.spurChans[i].spurChan)
-
-	struct ath_common *common = ath9k_hw_common(ah);
-	u16 spur_val = AR_NO_SPUR;
-
-	ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
-		i, is2GHz, ah->config.spurchans[i][is2GHz]);
-
-	switch (ah->config.spurmode) {
-	case SPUR_DISABLE:
-		break;
-	case SPUR_ENABLE_IOCTL:
-		spur_val = ah->config.spurchans[i][is2GHz];
-		ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
-			spur_val);
-		break;
-	case SPUR_ENABLE_EEPROM:
-		spur_val = EEP_MAP9287_SPURCHAN;
-		break;
-	}
-
-	return spur_val;
-
-#undef EEP_MAP9287_SPURCHAN
+	return ah->eeprom.map9287.modalHeader.spurChans[i].spurChan;
 }
 
 const struct eeprom_ops eep_ar9287_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 39107e31e79a..3218ca994746 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -1348,31 +1348,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
 
 static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
 {
-#define EEP_DEF_SPURCHAN \
-	(ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan)
-	struct ath_common *common = ath9k_hw_common(ah);
-
-	u16 spur_val = AR_NO_SPUR;
-
-	ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
-		i, is2GHz, ah->config.spurchans[i][is2GHz]);
-
-	switch (ah->config.spurmode) {
-	case SPUR_DISABLE:
-		break;
-	case SPUR_ENABLE_IOCTL:
-		spur_val = ah->config.spurchans[i][is2GHz];
-		ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
-			spur_val);
-		break;
-	case SPUR_ENABLE_EEPROM:
-		spur_val = EEP_DEF_SPURCHAN;
-		break;
-	}
-
-	return spur_val;
-
-#undef EEP_DEF_SPURCHAN
+	return ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan;
 }
 
 const struct eeprom_ops eep_def_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index c34f21241da9..b1956bf6e01e 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -157,36 +157,6 @@ static void ath_detect_bt_priority(struct ath_softc *sc)
 	}
 }
 
-static void ath9k_gen_timer_start(struct ath_hw *ah,
-				  struct ath_gen_timer *timer,
-				  u32 trig_timeout,
-				  u32 timer_period)
-{
-	ath9k_hw_gen_timer_start(ah, timer, trig_timeout, timer_period);
-
-	if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
-		ath9k_hw_disable_interrupts(ah);
-		ah->imask |= ATH9K_INT_GENTIMER;
-		ath9k_hw_set_interrupts(ah);
-		ath9k_hw_enable_interrupts(ah);
-	}
-}
-
-static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
-{
-	struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
-
-	ath9k_hw_gen_timer_stop(ah, timer);
-
-	/* if no timer is enabled, turn off interrupt mask */
-	if (timer_table->timer_mask.val == 0) {
-		ath9k_hw_disable_interrupts(ah);
-		ah->imask &= ~ATH9K_INT_GENTIMER;
-		ath9k_hw_set_interrupts(ah);
-		ath9k_hw_enable_interrupts(ah);
-	}
-}
-
 static void ath_mci_ftp_adjust(struct ath_softc *sc)
 {
 	struct ath_btcoex *btcoex = &sc->btcoex;
@@ -257,19 +227,9 @@ static void ath_btcoex_period_timer(unsigned long data)
 
 	spin_unlock_bh(&btcoex->btcoex_lock);
 
-	/*
-	 * btcoex_period is in msec while (btocex/btscan_)no_stomp are in usec,
-	 * ensure that we properly convert btcoex_period to usec
-	 * for any comparision with (btcoex/btscan_)no_stomp.
-	 */
-	if (btcoex->btcoex_period * 1000 != btcoex->btcoex_no_stomp) {
-		if (btcoex->hw_timer_enabled)
-			ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
-
-		ath9k_gen_timer_start(ah, btcoex->no_stomp_timer, timer_period,
-				      timer_period * 10);
-		btcoex->hw_timer_enabled = true;
-	}
+	if (btcoex->btcoex_period != btcoex->btcoex_no_stomp)
+		mod_timer(&btcoex->no_stomp_timer,
+			 jiffies + msecs_to_jiffies(timer_period));
 
 	ath9k_ps_restore(sc);
 
@@ -282,7 +242,7 @@ skip_hw_wakeup:
  * Generic tsf based hw timer which configures weight
  * registers to time slice between wlan and bt traffic
  */
-static void ath_btcoex_no_stomp_timer(void *arg)
+static void ath_btcoex_no_stomp_timer(unsigned long arg)
 {
 	struct ath_softc *sc = (struct ath_softc *)arg;
 	struct ath_hw *ah = sc->sc_ah;
@@ -311,24 +271,18 @@ static int ath_init_btcoex_timer(struct ath_softc *sc)
 	struct ath_btcoex *btcoex = &sc->btcoex;
 
 	btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD;
-	btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * 1000 *
+	btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
 		btcoex->btcoex_period / 100;
-	btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * 1000 *
+	btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
 				   btcoex->btcoex_period / 100;
 
 	setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
 			(unsigned long) sc);
+	setup_timer(&btcoex->no_stomp_timer, ath_btcoex_no_stomp_timer,
+			(unsigned long) sc);
 
 	spin_lock_init(&btcoex->btcoex_lock);
 
-	btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
-			ath_btcoex_no_stomp_timer,
-			ath_btcoex_no_stomp_timer,
-			(void *) sc, AR_FIRST_NDP_TIMER);
-
-	if (!btcoex->no_stomp_timer)
-		return -ENOMEM;
-
 	return 0;
 }
 
@@ -343,10 +297,7 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
 	ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n");
 
 	/* make sure duty cycle timer is also stopped when resuming */
-	if (btcoex->hw_timer_enabled) {
-		ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
-		btcoex->hw_timer_enabled = false;
-	}
+	del_timer_sync(&btcoex->no_stomp_timer);
 
 	btcoex->bt_priority_cnt = 0;
 	btcoex->bt_priority_time = jiffies;
@@ -363,24 +314,16 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
 void ath9k_btcoex_timer_pause(struct ath_softc *sc)
 {
 	struct ath_btcoex *btcoex = &sc->btcoex;
-	struct ath_hw *ah = sc->sc_ah;
 
 	del_timer_sync(&btcoex->period_timer);
-
-	if (btcoex->hw_timer_enabled) {
-		ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
-		btcoex->hw_timer_enabled = false;
-	}
+	del_timer_sync(&btcoex->no_stomp_timer);
 }
 
 void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
 {
 	struct ath_btcoex *btcoex = &sc->btcoex;
 
-	if (btcoex->hw_timer_enabled) {
-		ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
-		btcoex->hw_timer_enabled = false;
-	}
+	del_timer_sync(&btcoex->no_stomp_timer);
 }
 
 u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
@@ -400,12 +343,6 @@ u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
 
 void ath9k_btcoex_handle_interrupt(struct ath_softc *sc, u32 status)
 {
-	struct ath_hw *ah = sc->sc_ah;
-
-	if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
-		if (status & ATH9K_INT_GENTIMER)
-			ath_gen_timer_isr(sc->sc_ah);
-
 	if (status & ATH9K_INT_MCI)
 		ath_mci_intr(sc);
 }
@@ -447,10 +384,6 @@ void ath9k_deinit_btcoex(struct ath_softc *sc)
 {
 	struct ath_hw *ah = sc->sc_ah;
 
-        if ((sc->btcoex.no_stomp_timer) &&
-	    ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE)
-		ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
-
 	if (ath9k_hw_mci_is_enabled(ah))
 		ath_mci_cleanup(sc);
 }
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 055d7c25e090..58da3468d1f0 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -600,10 +600,15 @@ void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
 struct base_eep_header *ath9k_htc_get_eeprom_base(struct ath9k_htc_priv *priv);
 
 #ifdef CONFIG_MAC80211_LEDS
+void ath9k_configure_leds(struct ath9k_htc_priv *priv);
 void ath9k_init_leds(struct ath9k_htc_priv *priv);
 void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
 void ath9k_led_work(struct work_struct *work);
 #else
+static inline void ath9k_configure_leds(struct ath9k_htc_priv *priv)
+{
+}
+
 static inline void ath9k_init_leds(struct ath9k_htc_priv *priv)
 {
 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index e0c03bd64182..8b5757734596 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -70,11 +70,11 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
 	struct ath9k_beacon_state bs;
 	enum ath9k_int imask = 0;
 	int dtimperiod, dtimcount, sleepduration;
-	int cfpperiod, cfpcount, bmiss_timeout;
+	int bmiss_timeout;
 	u32 nexttbtt = 0, intval, tsftu;
 	__be32 htc_imask = 0;
 	u64 tsf;
-	int num_beacons, offset, dtim_dec_count, cfp_dec_count;
+	int num_beacons, offset, dtim_dec_count;
 	int ret __attribute__ ((unused));
 	u8 cmd_rsp;
 
@@ -84,7 +84,7 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
 	bmiss_timeout = (ATH_DEFAULT_BMISS_LIMIT * bss_conf->beacon_interval);
 
 	/*
-	 * Setup dtim and cfp parameters according to
+	 * Setup dtim parameters according to
 	 * last beacon we received (which may be none).
 	 */
 	dtimperiod = bss_conf->dtim_period;
@@ -93,8 +93,6 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
 	dtimcount = 1;
 	if (dtimcount >= dtimperiod)	/* NB: sanity check */
 		dtimcount = 0;
-	cfpperiod = 1;			/* NB: no PCF support yet */
-	cfpcount = 0;
 
 	sleepduration = intval;
 	if (sleepduration <= 0)
@@ -102,7 +100,7 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
 
 	/*
 	 * Pull nexttbtt forward to reflect the current
-	 * TSF and calculate dtim+cfp state for the result.
+	 * TSF and calculate dtim state for the result.
 	 */
 	tsf = ath9k_hw_gettsf64(priv->ah);
 	tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
@@ -115,26 +113,14 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
 
 	/* DTIM Beacon every dtimperiod Beacon */
 	dtim_dec_count = num_beacons % dtimperiod;
-	/* CFP every cfpperiod DTIM Beacon */
-	cfp_dec_count = (num_beacons / dtimperiod) % cfpperiod;
-	if (dtim_dec_count)
-		cfp_dec_count++;
-
 	dtimcount -= dtim_dec_count;
 	if (dtimcount < 0)
 		dtimcount += dtimperiod;
 
-	cfpcount -= cfp_dec_count;
-	if (cfpcount < 0)
-		cfpcount += cfpperiod;
-
-	bs.bs_intval = intval;
-	bs.bs_nexttbtt = nexttbtt;
-	bs.bs_dtimperiod = dtimperiod*intval;
-	bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
-	bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
-	bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
-	bs.bs_cfpmaxduration = 0;
+	bs.bs_intval = TU_TO_USEC(intval);
+	bs.bs_nexttbtt = TU_TO_USEC(nexttbtt);
+	bs.bs_dtimperiod = dtimperiod * bs.bs_intval;
+	bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount * bs.bs_intval;
 
 	/*
 	 * Calculate the number of consecutive beacons to miss* before taking
@@ -161,7 +147,8 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
 	 * XXX fixed at 100ms
 	 */
 
-	bs.bs_sleepduration = roundup(IEEE80211_MS_TO_TU(100), sleepduration);
+	bs.bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
+						 sleepduration));
 	if (bs.bs_sleepduration > bs.bs_dtimperiod)
 		bs.bs_sleepduration = bs.bs_dtimperiod;
 
@@ -170,10 +157,8 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
 
 	ath_dbg(common, CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
 		intval, tsf, tsftu);
-	ath_dbg(common, CONFIG,
-		"bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
-		bs.bs_bmissthreshold, bs.bs_sleepduration,
-		bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
+	ath_dbg(common, CONFIG, "bmiss: %u sleep: %u\n",
+		bs.bs_bmissthreshold, bs.bs_sleepduration);
 
 	/* Set the computed STA beacon timers */
 
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 105582d6b714..50f74a2a4cf8 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -255,6 +255,17 @@ void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
 	cancel_work_sync(&priv->led_work);
 }
 
+
+void ath9k_configure_leds(struct ath9k_htc_priv *priv)
+{
+	/* Configure gpio 1 for output */
+	ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
+			    AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+	/* LED off, active low */
+	ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
+
+}
+
 void ath9k_init_leds(struct ath9k_htc_priv *priv)
 {
 	int ret;
@@ -268,11 +279,7 @@ void ath9k_init_leds(struct ath9k_htc_priv *priv)
 	else
 		priv->ah->led_pin = ATH_LED_PIN_DEF;
 
-	/* Configure gpio 1 for output */
-	ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
-			    AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
-	/* LED off, active low */
-	ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
+	ath9k_configure_leds(priv);
 
 	snprintf(priv->led_name, sizeof(priv->led_name),
 		"ath9k_htc-%s", wiphy_name(priv->hw->wiphy));
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index c3676bf1d6c4..f4e1de20d99c 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -748,7 +748,6 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
 			    WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
 
 	hw->queues = 4;
-	hw->channel_change_time = 5000;
 	hw->max_listen_interval = 1;
 
 	hw->vif_data_size = sizeof(struct ath9k_htc_vif);
@@ -1000,6 +999,8 @@ int ath9k_htc_resume(struct htc_target *htc_handle)
 
 	ret = ath9k_init_htc_services(priv, priv->ah->hw_version.devid,
 				      priv->ah->hw_version.usbdev);
+	ath9k_configure_leds(priv);
+
 	return ret;
 }
 #endif
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index c028df76b564..12e0f32a4905 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -1075,9 +1075,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
 
 	last_rssi = priv->rx.last_rssi;
 
-	if (ieee80211_is_beacon(hdr->frame_control) &&
-	    !is_zero_ether_addr(common->curbssid) &&
-	    ether_addr_equal(hdr->addr3, common->curbssid)) {
+	if (ath_is_mybeacon(common, hdr)) {
 		s8 rssi = rxbuf->rxstatus.rs_rssi;
 
 		if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 4f9378ddf07f..a47ea8423f1e 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -49,9 +49,10 @@ static inline bool ath9k_hw_calibrate(struct ath_hw *ah,
 	return ath9k_hw_ops(ah)->calibrate(ah, chan, rxchainmask, longcal);
 }
 
-static inline bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
+static inline bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked,
+				   u32 *sync_cause_p)
 {
-	return ath9k_hw_ops(ah)->get_isr(ah, masked);
+	return ath9k_hw_ops(ah)->get_isr(ah, masked, sync_cause_p);
 }
 
 static inline void ath9k_hw_set_txdesc(struct ath_hw *ah, void *ds,
@@ -106,6 +107,21 @@ static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
 
 /* Private hardware call ops */
 
+static inline void ath9k_hw_init_hang_checks(struct ath_hw *ah)
+{
+	ath9k_hw_private_ops(ah)->init_hang_checks(ah);
+}
+
+static inline bool ath9k_hw_detect_mac_hang(struct ath_hw *ah)
+{
+	return ath9k_hw_private_ops(ah)->detect_mac_hang(ah);
+}
+
+static inline bool ath9k_hw_detect_bb_hang(struct ath_hw *ah)
+{
+	return ath9k_hw_private_ops(ah)->detect_bb_hang(ah);
+}
+
 /* PHY ops */
 
 static inline int ath9k_hw_rf_set_freq(struct ath_hw *ah,
@@ -231,4 +247,31 @@ static inline void ath9k_hw_set_radar_params(struct ath_hw *ah)
 	ath9k_hw_private_ops(ah)->set_radar_params(ah, &ah->radar_conf);
 }
 
+static inline void ath9k_hw_init_cal_settings(struct ath_hw *ah)
+{
+	ath9k_hw_private_ops(ah)->init_cal_settings(ah);
+}
+
+static inline u32 ath9k_hw_compute_pll_control(struct ath_hw *ah,
+					       struct ath9k_channel *chan)
+{
+	return ath9k_hw_private_ops(ah)->compute_pll_control(ah, chan);
+}
+
+static inline void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
+{
+	if (!ath9k_hw_private_ops(ah)->init_mode_gain_regs)
+		return;
+
+	ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah);
+}
+
+static inline void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah)
+{
+	if (!ath9k_hw_private_ops(ah)->ani_cache_ini_regs)
+		return;
+
+	ath9k_hw_private_ops(ah)->ani_cache_ini_regs(ah);
+}
+
 #endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8918035da3a3..fbf43c05713f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -17,6 +17,8 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/time.h>
+#include <linux/bitops.h>
 #include <asm/unaligned.h>
 
 #include "hw.h"
@@ -35,99 +37,6 @@ MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
 MODULE_LICENSE("Dual BSD/GPL");
 
-static int __init ath9k_init(void)
-{
-	return 0;
-}
-module_init(ath9k_init);
-
-static void __exit ath9k_exit(void)
-{
-	return;
-}
-module_exit(ath9k_exit);
-
-/* Private hardware callbacks */
-
-static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
-{
-	ath9k_hw_private_ops(ah)->init_cal_settings(ah);
-}
-
-static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah,
-					struct ath9k_channel *chan)
-{
-	return ath9k_hw_private_ops(ah)->compute_pll_control(ah, chan);
-}
-
-static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
-{
-	if (!ath9k_hw_private_ops(ah)->init_mode_gain_regs)
-		return;
-
-	ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah);
-}
-
-static void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah)
-{
-	/* You will not have this callback if using the old ANI */
-	if (!ath9k_hw_private_ops(ah)->ani_cache_ini_regs)
-		return;
-
-	ath9k_hw_private_ops(ah)->ani_cache_ini_regs(ah);
-}
-
-/********************/
-/* Helper Functions */
-/********************/
-
-#ifdef CONFIG_ATH9K_DEBUGFS
-
-void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause)
-{
-	struct ath_softc *sc = common->priv;
-	if (sync_cause)
-		sc->debug.stats.istats.sync_cause_all++;
-	if (sync_cause & AR_INTR_SYNC_RTC_IRQ)
-		sc->debug.stats.istats.sync_rtc_irq++;
-	if (sync_cause & AR_INTR_SYNC_MAC_IRQ)
-		sc->debug.stats.istats.sync_mac_irq++;
-	if (sync_cause & AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS)
-		sc->debug.stats.istats.eeprom_illegal_access++;
-	if (sync_cause & AR_INTR_SYNC_APB_TIMEOUT)
-		sc->debug.stats.istats.apb_timeout++;
-	if (sync_cause & AR_INTR_SYNC_PCI_MODE_CONFLICT)
-		sc->debug.stats.istats.pci_mode_conflict++;
-	if (sync_cause & AR_INTR_SYNC_HOST1_FATAL)
-		sc->debug.stats.istats.host1_fatal++;
-	if (sync_cause & AR_INTR_SYNC_HOST1_PERR)
-		sc->debug.stats.istats.host1_perr++;
-	if (sync_cause & AR_INTR_SYNC_TRCV_FIFO_PERR)
-		sc->debug.stats.istats.trcv_fifo_perr++;
-	if (sync_cause & AR_INTR_SYNC_RADM_CPL_EP)
-		sc->debug.stats.istats.radm_cpl_ep++;
-	if (sync_cause & AR_INTR_SYNC_RADM_CPL_DLLP_ABORT)
-		sc->debug.stats.istats.radm_cpl_dllp_abort++;
-	if (sync_cause & AR_INTR_SYNC_RADM_CPL_TLP_ABORT)
-		sc->debug.stats.istats.radm_cpl_tlp_abort++;
-	if (sync_cause & AR_INTR_SYNC_RADM_CPL_ECRC_ERR)
-		sc->debug.stats.istats.radm_cpl_ecrc_err++;
-	if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT)
-		sc->debug.stats.istats.radm_cpl_timeout++;
-	if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
-		sc->debug.stats.istats.local_timeout++;
-	if (sync_cause & AR_INTR_SYNC_PM_ACCESS)
-		sc->debug.stats.istats.pm_access++;
-	if (sync_cause & AR_INTR_SYNC_MAC_AWAKE)
-		sc->debug.stats.istats.mac_awake++;
-	if (sync_cause & AR_INTR_SYNC_MAC_ASLEEP)
-		sc->debug.stats.istats.mac_asleep++;
-	if (sync_cause & AR_INTR_SYNC_MAC_SLEEP_ACCESS)
-		sc->debug.stats.istats.mac_sleep_access++;
-}
-#endif
-
-
 static void ath9k_hw_set_clockrate(struct ath_hw *ah)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
@@ -336,6 +245,9 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
 	case AR9300_DEVID_QCA955X:
 		ah->hw_version.macVersion = AR_SREV_VERSION_9550;
 		return;
+	case AR9300_DEVID_AR953X:
+		ah->hw_version.macVersion = AR_SREV_VERSION_9531;
+		return;
 	}
 
 	val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
@@ -437,23 +349,22 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
 
 static void ath9k_hw_init_config(struct ath_hw *ah)
 {
-	int i;
+	struct ath_common *common = ath9k_hw_common(ah);
 
 	ah->config.dma_beacon_response_time = 1;
 	ah->config.sw_beacon_response_time = 6;
-	ah->config.additional_swba_backoff = 0;
-	ah->config.ack_6mb = 0x0;
 	ah->config.cwm_ignore_extcca = 0;
-	ah->config.pcie_clock_req = 0;
 	ah->config.analog_shiftreg = 1;
 
-	for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
-		ah->config.spurchans[i][0] = AR_NO_SPUR;
-		ah->config.spurchans[i][1] = AR_NO_SPUR;
-	}
-
 	ah->config.rx_intr_mitigation = true;
-	ah->config.pcieSerDesWrite = true;
+
+	if (AR_SREV_9300_20_OR_LATER(ah)) {
+		ah->config.rimt_last = 500;
+		ah->config.rimt_first = 2000;
+	} else {
+		ah->config.rimt_last = 250;
+		ah->config.rimt_first = 700;
+	}
 
 	/*
 	 * We need this for PCI devices only (Cardbus, PCI, miniPCI)
@@ -473,6 +384,24 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
 	 */
 	if (num_possible_cpus() > 1)
 		ah->config.serialize_regmode = SER_REG_MODE_AUTO;
+
+	if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
+		if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
+		    ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
+		     !ah->is_pciexpress)) {
+			ah->config.serialize_regmode = SER_REG_MODE_ON;
+		} else {
+			ah->config.serialize_regmode = SER_REG_MODE_OFF;
+		}
+	}
+
+	ath_dbg(common, RESET, "serialize_regmode is %d\n",
+		ah->config.serialize_regmode);
+
+	if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+		ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD >> 1;
+	else
+		ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD;
 }
 
 static void ath9k_hw_init_defaults(struct ath_hw *ah)
@@ -485,16 +414,24 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
 	ah->hw_version.magic = AR5416_MAGIC;
 	ah->hw_version.subvendorid = 0;
 
-	ah->atim_window = 0;
-	ah->sta_id1_defaults =
-		AR_STA_ID1_CRPT_MIC_ENABLE |
-		AR_STA_ID1_MCAST_KSRCH;
+	ah->sta_id1_defaults = AR_STA_ID1_CRPT_MIC_ENABLE |
+			       AR_STA_ID1_MCAST_KSRCH;
 	if (AR_SREV_9100(ah))
 		ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX;
+
 	ah->slottime = ATH9K_SLOT_TIME_9;
 	ah->globaltxtimeout = (u32) -1;
 	ah->power_mode = ATH9K_PM_UNDEFINED;
 	ah->htc_reset_init = true;
+
+	ah->ani_function = ATH9K_ANI_ALL;
+	if (!AR_SREV_9300_20_OR_LATER(ah))
+		ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
+
+	if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+		ah->tx_trig_level = (AR_FTRIG_256B >> AR_FTRIG_S);
+	else
+		ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
 }
 
 static int ath9k_hw_init_macaddr(struct ath_hw *ah)
@@ -548,11 +485,11 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
 	 * EEPROM needs to be initialized before we do this.
 	 * This is required for regulatory compliance.
 	 */
-	if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+	if (AR_SREV_9300_20_OR_LATER(ah)) {
 		u16 regdmn = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
 		if ((regdmn & 0xF0) == CTL_FCC) {
-			ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ;
-			ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ;
+			ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_FCC_2GHZ;
+			ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_FCC_5GHZ;
 		}
 	}
 
@@ -576,6 +513,31 @@ static int __ath9k_hw_init(struct ath_hw *ah)
 
 	ath9k_hw_read_revisions(ah);
 
+	switch (ah->hw_version.macVersion) {
+	case AR_SREV_VERSION_5416_PCI:
+	case AR_SREV_VERSION_5416_PCIE:
+	case AR_SREV_VERSION_9160:
+	case AR_SREV_VERSION_9100:
+	case AR_SREV_VERSION_9280:
+	case AR_SREV_VERSION_9285:
+	case AR_SREV_VERSION_9287:
+	case AR_SREV_VERSION_9271:
+	case AR_SREV_VERSION_9300:
+	case AR_SREV_VERSION_9330:
+	case AR_SREV_VERSION_9485:
+	case AR_SREV_VERSION_9340:
+	case AR_SREV_VERSION_9462:
+	case AR_SREV_VERSION_9550:
+	case AR_SREV_VERSION_9565:
+	case AR_SREV_VERSION_9531:
+		break;
+	default:
+		ath_err(common,
+			"Mac Chip Rev 0x%02x.%x is not supported by this driver\n",
+			ah->hw_version.macVersion, ah->hw_version.macRev);
+		return -EOPNOTSUPP;
+	}
+
 	/*
 	 * Read back AR_WA into a permanent copy and set bits 14 and 17.
 	 * We need to do this to avoid RMW of this register. We cannot
@@ -609,50 +571,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
 		return -EIO;
 	}
 
-	if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
-		if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
-		    ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
-		     !ah->is_pciexpress)) {
-			ah->config.serialize_regmode =
-				SER_REG_MODE_ON;
-		} else {
-			ah->config.serialize_regmode =
-				SER_REG_MODE_OFF;
-		}
-	}
-
-	ath_dbg(common, RESET, "serialize_regmode is %d\n",
-		ah->config.serialize_regmode);
-
-	if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
-		ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD >> 1;
-	else
-		ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD;
-
-	switch (ah->hw_version.macVersion) {
-	case AR_SREV_VERSION_5416_PCI:
-	case AR_SREV_VERSION_5416_PCIE:
-	case AR_SREV_VERSION_9160:
-	case AR_SREV_VERSION_9100:
-	case AR_SREV_VERSION_9280:
-	case AR_SREV_VERSION_9285:
-	case AR_SREV_VERSION_9287:
-	case AR_SREV_VERSION_9271:
-	case AR_SREV_VERSION_9300:
-	case AR_SREV_VERSION_9330:
-	case AR_SREV_VERSION_9485:
-	case AR_SREV_VERSION_9340:
-	case AR_SREV_VERSION_9462:
-	case AR_SREV_VERSION_9550:
-	case AR_SREV_VERSION_9565:
-		break;
-	default:
-		ath_err(common,
-			"Mac Chip Rev 0x%02x.%x is not supported by this driver\n",
-			ah->hw_version.macVersion, ah->hw_version.macRev);
-		return -EOPNOTSUPP;
-	}
-
 	if (AR_SREV_9271(ah) || AR_SREV_9100(ah) || AR_SREV_9340(ah) ||
 	    AR_SREV_9330(ah) || AR_SREV_9550(ah))
 		ah->is_pciexpress = false;
@@ -660,10 +578,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
 	ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
 	ath9k_hw_init_cal_settings(ah);
 
-	ah->ani_function = ATH9K_ANI_ALL;
-	if (!AR_SREV_9300_20_OR_LATER(ah))
-		ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
-
 	if (!ah->is_pciexpress)
 		ath9k_hw_disablepcie(ah);
 
@@ -682,15 +596,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
 		return r;
 	}
 
-	if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
-		ah->tx_trig_level = (AR_FTRIG_256B >> AR_FTRIG_S);
-	else
-		ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
-
-	if (AR_SREV_9330(ah))
-		ah->bb_watchdog_timeout_ms = 85;
-	else
-		ah->bb_watchdog_timeout_ms = 25;
+	ath9k_hw_init_hang_checks(ah);
 
 	common->state = ATH_HW_INITIALIZED;
 
@@ -723,6 +629,7 @@ int ath9k_hw_init(struct ath_hw *ah)
 	case AR9300_DEVID_AR9462:
 	case AR9485_DEVID_AR1111:
 	case AR9300_DEVID_AR9565:
+	case AR9300_DEVID_AR953X:
 		break;
 	default:
 		if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -858,7 +765,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
 		/* program BB PLL phase_shift */
 		REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
 			      AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x1);
-	} else if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
+	} else if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
 		u32 regval, pll2_divint, pll2_divfrac, refdiv;
 
 		REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
@@ -868,9 +775,15 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
 		udelay(100);
 
 		if (ah->is_clk_25mhz) {
-			pll2_divint = 0x54;
-			pll2_divfrac = 0x1eb85;
-			refdiv = 3;
+			if (AR_SREV_9531(ah)) {
+				pll2_divint = 0x1c;
+				pll2_divfrac = 0xa3d2;
+				refdiv = 1;
+			} else {
+				pll2_divint = 0x54;
+				pll2_divfrac = 0x1eb85;
+				refdiv = 3;
+			}
 		} else {
 			if (AR_SREV_9340(ah)) {
 				pll2_divint = 88;
@@ -884,7 +797,10 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
 		}
 
 		regval = REG_READ(ah, AR_PHY_PLL_MODE);
-		regval |= (0x1 << 16);
+		if (AR_SREV_9531(ah))
+			regval |= (0x1 << 22);
+		else
+			regval |= (0x1 << 16);
 		REG_WRITE(ah, AR_PHY_PLL_MODE, regval);
 		udelay(100);
 
@@ -894,14 +810,33 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
 
 		regval = REG_READ(ah, AR_PHY_PLL_MODE);
 		if (AR_SREV_9340(ah))
-			regval = (regval & 0x80071fff) | (0x1 << 30) |
-				 (0x1 << 13) | (0x4 << 26) | (0x18 << 19);
+			regval = (regval & 0x80071fff) |
+				(0x1 << 30) |
+				(0x1 << 13) |
+				(0x4 << 26) |
+				(0x18 << 19);
+		else if (AR_SREV_9531(ah))
+			regval = (regval & 0x01c00fff) |
+				(0x1 << 31) |
+				(0x2 << 29) |
+				(0xa << 25) |
+				(0x1 << 19) |
+				(0x6 << 12);
 		else
-			regval = (regval & 0x80071fff) | (0x3 << 30) |
-				 (0x1 << 13) | (0x4 << 26) | (0x60 << 19);
+			regval = (regval & 0x80071fff) |
+				(0x3 << 30) |
+				(0x1 << 13) |
+				(0x4 << 26) |
+				(0x60 << 19);
 		REG_WRITE(ah, AR_PHY_PLL_MODE, regval);
-		REG_WRITE(ah, AR_PHY_PLL_MODE,
-			  REG_READ(ah, AR_PHY_PLL_MODE) & 0xfffeffff);
+
+		if (AR_SREV_9531(ah))
+			REG_WRITE(ah, AR_PHY_PLL_MODE,
+				  REG_READ(ah, AR_PHY_PLL_MODE) & 0xffbfffff);
+		else
+			REG_WRITE(ah, AR_PHY_PLL_MODE,
+				  REG_READ(ah, AR_PHY_PLL_MODE) & 0xfffeffff);
+
 		udelay(1000);
 	}
 
@@ -1281,6 +1216,42 @@ void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
 	*coef_exponent = coef_exp - 16;
 }
 
+/* AR9330 WAR:
+ * call external reset function to reset WMAC if:
+ * - doing a cold reset
+ * - we have pending frames in the TX queues.
+ */
+static bool ath9k_hw_ar9330_reset_war(struct ath_hw *ah, int type)
+{
+	int i, npend = 0;
+
+	for (i = 0; i < AR_NUM_QCU; i++) {
+		npend = ath9k_hw_numtxpending(ah, i);
+		if (npend)
+			break;
+	}
+
+	if (ah->external_reset &&
+	    (npend || type == ATH9K_RESET_COLD)) {
+		int reset_err = 0;
+
+		ath_dbg(ath9k_hw_common(ah), RESET,
+			"reset MAC via external reset\n");
+
+		reset_err = ah->external_reset();
+		if (reset_err) {
+			ath_err(ath9k_hw_common(ah),
+				"External reset failed, err=%d\n",
+				reset_err);
+			return false;
+		}
+
+		REG_WRITE(ah, AR_RTC_RESET, 1);
+	}
+
+	return true;
+}
+
 static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
 {
 	u32 rst_flags;
@@ -1331,38 +1302,8 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
 	}
 
 	if (AR_SREV_9330(ah)) {
-		int npend = 0;
-		int i;
-
-		/* AR9330 WAR:
-		 * call external reset function to reset WMAC if:
-		 * - doing a cold reset
-		 * - we have pending frames in the TX queues
-		 */
-
-		for (i = 0; i < AR_NUM_QCU; i++) {
-			npend = ath9k_hw_numtxpending(ah, i);
-			if (npend)
-				break;
-		}
-
-		if (ah->external_reset &&
-		    (npend || type == ATH9K_RESET_COLD)) {
-			int reset_err = 0;
-
-			ath_dbg(ath9k_hw_common(ah), RESET,
-				"reset MAC via external reset\n");
-
-			reset_err = ah->external_reset();
-			if (reset_err) {
-				ath_err(ath9k_hw_common(ah),
-					"External reset failed, err=%d\n",
-					reset_err);
-				return false;
-			}
-
-			REG_WRITE(ah, AR_RTC_RESET, 1);
-		}
+		if (!ath9k_hw_ar9330_reset_war(ah, type))
+			return false;
 	}
 
 	if (ath9k_hw_mci_is_enabled(ah))
@@ -1372,7 +1313,12 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
 
 	REGWRITE_BUFFER_FLUSH(ah);
 
-	udelay(50);
+	if (AR_SREV_9300_20_OR_LATER(ah))
+		udelay(50);
+	else if (AR_SREV_9100(ah))
+		udelay(10000);
+	else
+		udelay(100);
 
 	REG_WRITE(ah, AR_RTC_RC, 0);
 	if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
@@ -1408,8 +1354,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
 
 	REGWRITE_BUFFER_FLUSH(ah);
 
-	if (!AR_SREV_9300_20_OR_LATER(ah))
-		udelay(2);
+	udelay(2);
 
 	if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
 		REG_WRITE(ah, AR_RC, 0);
@@ -1485,7 +1430,6 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
 	if (AR_SREV_9330(ah))
 		ar9003_hw_internal_regulator_apply(ah);
 	ath9k_hw_init_pll(ah, chan);
-	ath9k_hw_set_rfmode(ah, chan);
 
 	return true;
 }
@@ -1501,8 +1445,9 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
 	int r;
 
 	if (pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) {
-		band_switch = IS_CHAN_5GHZ(ah->curchan) != IS_CHAN_5GHZ(chan);
-		mode_diff = (chan->channelFlags != ah->curchan->channelFlags);
+		u32 flags_diff = chan->channelFlags ^ ah->curchan->channelFlags;
+		band_switch = !!(flags_diff & CHANNEL_5GHZ);
+		mode_diff = !!(flags_diff & ~CHANNEL_HT);
 	}
 
 	for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
@@ -1573,76 +1518,6 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
 	}
 }
 
-static bool ath9k_hw_check_dcs(u32 dma_dbg, u32 num_dcu_states,
-			       int *hang_state, int *hang_pos)
-{
-	static u32 dcu_chain_state[] = {5, 6, 9}; /* DCU chain stuck states */
-	u32 chain_state, dcs_pos, i;
-
-	for (dcs_pos = 0; dcs_pos < num_dcu_states; dcs_pos++) {
-		chain_state = (dma_dbg >> (5 * dcs_pos)) & 0x1f;
-		for (i = 0; i < 3; i++) {
-			if (chain_state == dcu_chain_state[i]) {
-				*hang_state = chain_state;
-				*hang_pos = dcs_pos;
-				return true;
-			}
-		}
-	}
-	return false;
-}
-
-#define DCU_COMPLETE_STATE        1
-#define DCU_COMPLETE_STATE_MASK 0x3
-#define NUM_STATUS_READS         50
-static bool ath9k_hw_detect_mac_hang(struct ath_hw *ah)
-{
-	u32 chain_state, comp_state, dcs_reg = AR_DMADBG_4;
-	u32 i, hang_pos, hang_state, num_state = 6;
-
-	comp_state = REG_READ(ah, AR_DMADBG_6);
-
-	if ((comp_state & DCU_COMPLETE_STATE_MASK) != DCU_COMPLETE_STATE) {
-		ath_dbg(ath9k_hw_common(ah), RESET,
-			"MAC Hang signature not found at DCU complete\n");
-		return false;
-	}
-
-	chain_state = REG_READ(ah, dcs_reg);
-	if (ath9k_hw_check_dcs(chain_state, num_state, &hang_state, &hang_pos))
-		goto hang_check_iter;
-
-	dcs_reg = AR_DMADBG_5;
-	num_state = 4;
-	chain_state = REG_READ(ah, dcs_reg);
-	if (ath9k_hw_check_dcs(chain_state, num_state, &hang_state, &hang_pos))
-		goto hang_check_iter;
-
-	ath_dbg(ath9k_hw_common(ah), RESET,
-		"MAC Hang signature 1 not found\n");
-	return false;
-
-hang_check_iter:
-	ath_dbg(ath9k_hw_common(ah), RESET,
-		"DCU registers: chain %08x complete %08x Hang: state %d pos %d\n",
-		chain_state, comp_state, hang_state, hang_pos);
-
-	for (i = 0; i < NUM_STATUS_READS; i++) {
-		chain_state = REG_READ(ah, dcs_reg);
-		chain_state = (chain_state >> (5 * hang_pos)) & 0x1f;
-		comp_state = REG_READ(ah, AR_DMADBG_6);
-
-		if (((comp_state & DCU_COMPLETE_STATE_MASK) !=
-					DCU_COMPLETE_STATE) ||
-		    (chain_state != hang_state))
-			return false;
-	}
-
-	ath_dbg(ath9k_hw_common(ah), RESET, "MAC Hang signature 1 found\n");
-
-	return true;
-}
-
 void ath9k_hw_check_nav(struct ath_hw *ah)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
@@ -1717,7 +1592,6 @@ static void ath9k_hw_reset_opmode(struct ath_hw *ah,
 
 	REG_RMW(ah, AR_STA_ID1, macStaId1
 		  | AR_STA_ID1_RTS_USE_DEF
-		  | (ah->config.ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
 		  | ah->sta_id1_defaults,
 		  ~AR_STA_ID1_SADH_MASK);
 	ath_hw_setbssidmask(common);
@@ -1776,7 +1650,7 @@ static void ath9k_hw_init_desc(struct ath_hw *ah)
 		}
 #ifdef __BIG_ENDIAN
 		else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
-			 AR_SREV_9550(ah))
+			 AR_SREV_9550(ah) || AR_SREV_9531(ah))
 			REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
 		else
 			REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
@@ -1814,7 +1688,7 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
 	 * If cross-band fcc is not supoprted, bail out if channelFlags differ.
 	 */
 	if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) &&
-	    chan->channelFlags != ah->curchan->channelFlags)
+	    ((chan->channelFlags ^ ah->curchan->channelFlags) & ~CHANNEL_HT))
 		goto fail;
 
 	if (!ath9k_hw_check_alive(ah))
@@ -1855,10 +1729,12 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 		   struct ath9k_hw_cal_data *caldata, bool fastcc)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
+	struct timespec ts;
 	u32 saveLedState;
 	u32 saveDefAntenna;
 	u32 macStaId1;
 	u64 tsf = 0;
+	s64 usec = 0;
 	int r;
 	bool start_mci_reset = false;
 	bool save_fullsleep = ah->chip_fullsleep;
@@ -1901,10 +1777,10 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 
 	macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
 
-	/* For chips on which RTC reset is done, save TSF before it gets cleared */
-	if (AR_SREV_9100(ah) ||
-	    (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)))
-		tsf = ath9k_hw_gettsf64(ah);
+	/* Save TSF before chip reset, a cold reset clears it */
+	tsf = ath9k_hw_gettsf64(ah);
+	getrawmonotonic(&ts);
+	usec = ts.tv_sec * 1000000ULL + ts.tv_nsec / 1000;
 
 	saveLedState = REG_READ(ah, AR_CFG_LED) &
 		(AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL |
@@ -1937,8 +1813,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 	}
 
 	/* Restore TSF */
-	if (tsf)
-		ath9k_hw_settsf64(ah, tsf);
+	getrawmonotonic(&ts);
+	usec = ts.tv_sec * 1000000ULL + ts.tv_nsec / 1000 - usec;
+	ath9k_hw_settsf64(ah, tsf + usec);
 
 	if (AR_SREV_9280_20_OR_LATER(ah))
 		REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
@@ -1950,6 +1827,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 	if (r)
 		return r;
 
+	ath9k_hw_set_rfmode(ah, chan);
+
 	if (ath9k_hw_mci_is_enabled(ah))
 		ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
 
@@ -2005,8 +1884,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 		REG_WRITE(ah, AR_OBS, 8);
 
 	if (ah->config.rx_intr_mitigation) {
-		REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
-		REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
+		REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, ah->config.rimt_last);
+		REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, ah->config.rimt_first);
 	}
 
 	if (ah->config.tx_intr_mitigation) {
@@ -2044,10 +1923,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 	ath9k_hw_loadnf(ah, chan);
 	ath9k_hw_start_nfcal(ah, true);
 
-	if (AR_SREV_9300_20_OR_LATER(ah)) {
+	if (AR_SREV_9300_20_OR_LATER(ah))
 		ar9003_hw_bb_watchdog_config(ah);
+
+	if (ah->config.hw_hang_checks & HW_PHYRESTART_CLC_WAR)
 		ar9003_hw_disable_phy_restart(ah);
-	}
 
 	ath9k_hw_apply_gpio_override(ah);
 
@@ -2171,7 +2051,11 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
 
 	REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
 		    AR_RTC_FORCE_WAKE_EN);
-	udelay(50);
+
+	if (AR_SREV_9100(ah))
+		udelay(10000);
+	else
+		udelay(50);
 
 	for (i = POWER_UP_TIME / 50; i > 0; i--) {
 		val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
@@ -2260,9 +2144,6 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
 	case NL80211_IFTYPE_ADHOC:
 		REG_SET_BIT(ah, AR_TXCFG,
 			    AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
-		REG_WRITE(ah, AR_NEXT_NDP_TIMER, next_beacon +
-			  TU_TO_USEC(ah->atim_window ? ah->atim_window : 1));
-		flags |= AR_NDP_TIMER_EN;
 	case NL80211_IFTYPE_MESH_POINT:
 	case NL80211_IFTYPE_AP:
 		REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon);
@@ -2283,7 +2164,6 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
 	REG_WRITE(ah, AR_BEACON_PERIOD, beacon_period);
 	REG_WRITE(ah, AR_DMA_BEACON_PERIOD, beacon_period);
 	REG_WRITE(ah, AR_SWBA_PERIOD, beacon_period);
-	REG_WRITE(ah, AR_NDP_PERIOD, beacon_period);
 
 	REGWRITE_BUFFER_FLUSH(ah);
 
@@ -2300,12 +2180,9 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
 
 	ENABLE_REGWRITE_BUFFER(ah);
 
-	REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
-
-	REG_WRITE(ah, AR_BEACON_PERIOD,
-		  TU_TO_USEC(bs->bs_intval));
-	REG_WRITE(ah, AR_DMA_BEACON_PERIOD,
-		  TU_TO_USEC(bs->bs_intval));
+	REG_WRITE(ah, AR_NEXT_TBTT_TIMER, bs->bs_nexttbtt);
+	REG_WRITE(ah, AR_BEACON_PERIOD, bs->bs_intval);
+	REG_WRITE(ah, AR_DMA_BEACON_PERIOD, bs->bs_intval);
 
 	REGWRITE_BUFFER_FLUSH(ah);
 
@@ -2333,9 +2210,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
 
 	ENABLE_REGWRITE_BUFFER(ah);
 
-	REG_WRITE(ah, AR_NEXT_DTIM,
-		  TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
-	REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP));
+	REG_WRITE(ah, AR_NEXT_DTIM, bs->bs_nextdtim - SLEEP_SLOP);
+	REG_WRITE(ah, AR_NEXT_TIM, nextTbtt - SLEEP_SLOP);
 
 	REG_WRITE(ah, AR_SLEEP1,
 		  SM((CAB_TIMEOUT_VAL << 3), AR_SLEEP1_CAB_TIMEOUT)
@@ -2349,8 +2225,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
 	REG_WRITE(ah, AR_SLEEP2,
 		  SM(beacontimeout, AR_SLEEP2_BEACON_TIMEOUT));
 
-	REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval));
-	REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod));
+	REG_WRITE(ah, AR_TIM_PERIOD, beaconintval);
+	REG_WRITE(ah, AR_DTIM_PERIOD, dtimperiod);
 
 	REGWRITE_BUFFER_FLUSH(ah);
 
@@ -2608,13 +2484,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
 	    ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
 			pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
 
-	/*
-	 * Fast channel change across bands is available
-	 * only for AR9462 and AR9565.
-	 */
-	if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
-		pCap->hw_caps |= ATH9K_HW_CAP_FCC_BAND_SWITCH;
-
 	return 0;
 }
 
@@ -2986,20 +2855,6 @@ static const struct ath_gen_timer_configuration gen_tmr_configuration[] =
 
 /* HW generic timer primitives */
 
-/* compute and clear index of rightmost 1 */
-static u32 rightmost_index(struct ath_gen_timer_table *timer_table, u32 *mask)
-{
-	u32 b;
-
-	b = *mask;
-	b &= (0-b);
-	*mask &= ~b;
-	b *= debruijn32;
-	b >>= 27;
-
-	return timer_table->gen_timer_index[b];
-}
-
 u32 ath9k_hw_gettsf32(struct ath_hw *ah)
 {
 	return REG_READ(ah, AR_TSF_L32);
@@ -3015,6 +2870,10 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
 	struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
 	struct ath_gen_timer *timer;
 
+	if ((timer_index < AR_FIRST_NDP_TIMER) ||
+		(timer_index >= ATH_MAX_GEN_TIMER))
+		return NULL;
+
 	timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL);
 	if (timer == NULL)
 		return NULL;
@@ -3032,23 +2891,13 @@ EXPORT_SYMBOL(ath_gen_timer_alloc);
 
 void ath9k_hw_gen_timer_start(struct ath_hw *ah,
 			      struct ath_gen_timer *timer,
-			      u32 trig_timeout,
+			      u32 timer_next,
 			      u32 timer_period)
 {
 	struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
-	u32 tsf, timer_next;
-
-	BUG_ON(!timer_period);
-
-	set_bit(timer->index, &timer_table->timer_mask.timer_bits);
+	u32 mask = 0;
 
-	tsf = ath9k_hw_gettsf32(ah);
-
-	timer_next = tsf + trig_timeout;
-
-	ath_dbg(ath9k_hw_common(ah), BTCOEX,
-		"current tsf %x period %x timer_next %x\n",
-		tsf, timer_period, timer_next);
+	timer_table->timer_mask |= BIT(timer->index);
 
 	/*
 	 * Program generic timer registers
@@ -3074,10 +2923,19 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
 				       (1 << timer->index));
 	}
 
-	/* Enable both trigger and thresh interrupt masks */
-	REG_SET_BIT(ah, AR_IMR_S5,
-		(SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
-		SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG)));
+	if (timer->trigger)
+		mask |= SM(AR_GENTMR_BIT(timer->index),
+			   AR_IMR_S5_GENTIMER_TRIG);
+	if (timer->overflow)
+		mask |= SM(AR_GENTMR_BIT(timer->index),
+			   AR_IMR_S5_GENTIMER_THRESH);
+
+	REG_SET_BIT(ah, AR_IMR_S5, mask);
+
+	if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
+		ah->imask |= ATH9K_INT_GENTIMER;
+		ath9k_hw_set_interrupts(ah);
+	}
 }
 EXPORT_SYMBOL(ath9k_hw_gen_timer_start);
 
@@ -3085,11 +2943,6 @@ void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
 {
 	struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
 
-	if ((timer->index < AR_FIRST_NDP_TIMER) ||
-		(timer->index >= ATH_MAX_GEN_TIMER)) {
-		return;
-	}
-
 	/* Clear generic timer enable bits. */
 	REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
 			gen_tmr_configuration[timer->index].mode_mask);
@@ -3109,7 +2962,12 @@ void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
 		(SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
 		SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG)));
 
-	clear_bit(timer->index, &timer_table->timer_mask.timer_bits);
+	timer_table->timer_mask &= ~BIT(timer->index);
+
+	if (timer_table->timer_mask == 0) {
+		ah->imask &= ~ATH9K_INT_GENTIMER;
+		ath9k_hw_set_interrupts(ah);
+	}
 }
 EXPORT_SYMBOL(ath9k_hw_gen_timer_stop);
 
@@ -3130,32 +2988,32 @@ void ath_gen_timer_isr(struct ath_hw *ah)
 {
 	struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
 	struct ath_gen_timer *timer;
-	struct ath_common *common = ath9k_hw_common(ah);
-	u32 trigger_mask, thresh_mask, index;
+	unsigned long trigger_mask, thresh_mask;
+	unsigned int index;
 
 	/* get hardware generic timer interrupt status */
 	trigger_mask = ah->intr_gen_timer_trigger;
 	thresh_mask = ah->intr_gen_timer_thresh;
-	trigger_mask &= timer_table->timer_mask.val;
-	thresh_mask &= timer_table->timer_mask.val;
-
-	trigger_mask &= ~thresh_mask;
+	trigger_mask &= timer_table->timer_mask;
+	thresh_mask &= timer_table->timer_mask;
 
-	while (thresh_mask) {
-		index = rightmost_index(timer_table, &thresh_mask);
+	for_each_set_bit(index, &thresh_mask, ARRAY_SIZE(timer_table->timers)) {
 		timer = timer_table->timers[index];
-		BUG_ON(!timer);
-		ath_dbg(common, BTCOEX, "TSF overflow for Gen timer %d\n",
-			index);
+		if (!timer)
+		    continue;
+		if (!timer->overflow)
+		    continue;
+
+		trigger_mask &= ~BIT(index);
 		timer->overflow(timer->arg);
 	}
 
-	while (trigger_mask) {
-		index = rightmost_index(timer_table, &trigger_mask);
+	for_each_set_bit(index, &trigger_mask, ARRAY_SIZE(timer_table->timers)) {
 		timer = timer_table->timers[index];
-		BUG_ON(!timer);
-		ath_dbg(common, BTCOEX,
-			"Gen timer[%d] trigger\n", index);
+		if (!timer)
+		    continue;
+		if (!timer->trigger)
+		    continue;
 		timer->trigger(timer->arg);
 	}
 }
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index a2c9a5dbac6b..0acd4b5a4892 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -52,6 +52,7 @@
 #define AR9300_DEVID_QCA955X	0x0038
 #define AR9485_DEVID_AR1111	0x0037
 #define AR9300_DEVID_AR9565     0x0036
+#define AR9300_DEVID_AR953X     0x003d
 
 #define AR5416_AR9100_DEVID	0x000b
 
@@ -168,7 +169,7 @@
 #define CAB_TIMEOUT_VAL             10
 #define BEACON_TIMEOUT_VAL          10
 #define MIN_BEACON_TIMEOUT_VAL      1
-#define SLEEP_SLOP                  3
+#define SLEEP_SLOP                  TU_TO_USEC(3)
 
 #define INIT_CONFIG_STATUS          0x00000000
 #define INIT_RSSI_THR               0x00000700
@@ -277,14 +278,25 @@ struct ath9k_hw_capabilities {
 	u8 txs_len;
 };
 
+#define AR_NO_SPUR      	0x8000
+#define AR_BASE_FREQ_2GHZ   	2300
+#define AR_BASE_FREQ_5GHZ   	4900
+#define AR_SPUR_FEEQ_BOUND_HT40 19
+#define AR_SPUR_FEEQ_BOUND_HT20 10
+
+enum ath9k_hw_hang_checks {
+	HW_BB_WATCHDOG            = BIT(0),
+	HW_PHYRESTART_CLC_WAR     = BIT(1),
+	HW_BB_RIFS_HANG           = BIT(2),
+	HW_BB_DFS_HANG            = BIT(3),
+	HW_BB_RX_CLEAR_STUCK_HANG = BIT(4),
+	HW_MAC_HANG               = BIT(5),
+};
+
 struct ath9k_ops_config {
 	int dma_beacon_response_time;
 	int sw_beacon_response_time;
-	int additional_swba_backoff;
-	int ack_6mb;
 	u32 cwm_ignore_extcca;
-	bool pcieSerDesWrite;
-	u8 pcie_clock_req;
 	u32 pcie_waen;
 	u8 analog_shiftreg;
 	u32 ofdm_trig_low;
@@ -295,20 +307,11 @@ struct ath9k_ops_config {
 	int serialize_regmode;
 	bool rx_intr_mitigation;
 	bool tx_intr_mitigation;
-#define SPUR_DISABLE        	0
-#define SPUR_ENABLE_IOCTL   	1
-#define SPUR_ENABLE_EEPROM  	2
-#define AR_SPUR_5413_1      	1640
-#define AR_SPUR_5413_2      	1200
-#define AR_NO_SPUR      	0x8000
-#define AR_BASE_FREQ_2GHZ   	2300
-#define AR_BASE_FREQ_5GHZ   	4900
-#define AR_SPUR_FEEQ_BOUND_HT40 19
-#define AR_SPUR_FEEQ_BOUND_HT20 10
-	int spurmode;
-	u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
 	u8 max_txtrig_level;
 	u16 ani_poll_interval; /* ANI poll interval in ms */
+	u16 hw_hang_checks;
+	u16 rimt_first;
+	u16 rimt_last;
 
 	/* Platform specific config */
 	u32 aspm_l1_fix;
@@ -317,6 +320,7 @@ struct ath9k_ops_config {
 	bool xatten_margin_cfg;
 	bool alt_mingainidx;
 	bool no_pll_pwrsave;
+	bool tx_gain_buffalo;
 };
 
 enum ath9k_int {
@@ -460,10 +464,6 @@ struct ath9k_beacon_state {
 	u32 bs_intval;
 #define ATH9K_TSFOOR_THRESHOLD    0x00004240 /* 16k us */
 	u32 bs_dtimperiod;
-	u16 bs_cfpperiod;
-	u16 bs_cfpmaxduration;
-	u32 bs_cfpnext;
-	u16 bs_timoffset;
 	u16 bs_bmissthreshold;
 	u32 bs_sleepduration;
 	u32 bs_tsfoor_threshold;
@@ -499,12 +499,6 @@ struct ath9k_hw_version {
 
 #define AR_GENTMR_BIT(_index)	(1 << (_index))
 
-/*
- * Using de Bruijin sequence to look up 1's index in a 32 bit number
- * debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001
- */
-#define debruijn32 0x077CB531U
-
 struct ath_gen_timer_configuration {
 	u32 next_addr;
 	u32 period_addr;
@@ -520,12 +514,8 @@ struct ath_gen_timer {
 };
 
 struct ath_gen_timer_table {
-	u32 gen_timer_index[32];
 	struct ath_gen_timer *timers[ATH_MAX_GEN_TIMER];
-	union {
-		unsigned long timer_bits;
-		u16 val;
-	} timer_mask;
+	u16 timer_mask;
 };
 
 struct ath_hw_antcomb_conf {
@@ -596,6 +586,10 @@ struct ath_hw_radar_conf {
  *	register settings through the register initialization.
  */
 struct ath_hw_private_ops {
+	void (*init_hang_checks)(struct ath_hw *ah);
+	bool (*detect_mac_hang)(struct ath_hw *ah);
+	bool (*detect_bb_hang)(struct ath_hw *ah);
+
 	/* Calibration ops */
 	void (*init_cal_settings)(struct ath_hw *ah);
 	bool (*init_cal)(struct ath_hw *ah, struct ath9k_channel *chan);
@@ -690,7 +684,8 @@ struct ath_hw_ops {
 			  struct ath9k_channel *chan,
 			  u8 rxchainmask,
 			  bool longcal);
-	bool (*get_isr)(struct ath_hw *ah, enum ath9k_int *masked);
+	bool (*get_isr)(struct ath_hw *ah, enum ath9k_int *masked,
+			u32 *sync_cause_p);
 	void (*set_txdesc)(struct ath_hw *ah, void *ds,
 			   struct ath_tx_info *i);
 	int (*proc_txdesc)(struct ath_hw *ah, void *ds,
@@ -786,7 +781,6 @@ struct ath_hw {
 	u32 txurn_interrupt_mask;
 	atomic_t intr_ref_cnt;
 	bool chip_fullsleep;
-	u32 atim_window;
 	u32 modes_index;
 
 	/* Calibration */
@@ -865,6 +859,7 @@ struct ath_hw {
 	u32 gpio_mask;
 	u32 gpio_val;
 
+	struct ar5416IniArray ini_dfs;
 	struct ar5416IniArray iniModes;
 	struct ar5416IniArray iniCommon;
 	struct ar5416IniArray iniBB_RfGain;
@@ -921,7 +916,7 @@ struct ath_hw {
 	/* Enterprise mode cap */
 	u32 ent_mode;
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ATH9K_WOW
 	u32 wow_event_mask;
 #endif
 	bool is_clk_25mhz;
@@ -1017,13 +1012,6 @@ bool ath9k_hw_check_alive(struct ath_hw *ah);
 
 bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
 
-#ifdef CONFIG_ATH9K_DEBUGFS
-void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause);
-#else
-static inline void ath9k_debug_sync_cause(struct ath_common *common,
-					  u32 sync_cause) {}
-#endif
-
 /* Generic hw timer primitives */
 struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
 					  void (*trigger)(void *),
@@ -1058,6 +1046,7 @@ void ar9002_hw_enable_async_fifo(struct ath_hw *ah);
  * Code specific to AR9003, we stuff these here to avoid callbacks
  * for older families
  */
+bool ar9003_hw_bb_watchdog_check(struct ath_hw *ah);
 void ar9003_hw_bb_watchdog_config(struct ath_hw *ah);
 void ar9003_hw_bb_watchdog_read(struct ath_hw *ah);
 void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah);
@@ -1127,7 +1116,7 @@ ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
 #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
 
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ATH9K_WOW
 const char *ath9k_hw_wow_event_to_string(u32 wow_event);
 void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
 				u8 *user_mask, int pattern_count,
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 710192ed27ed..c36de303c8f3 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -470,7 +470,6 @@ static int ath9k_init_queues(struct ath_softc *sc)
 
 	sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
 	sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
-
 	ath_cabq_update(sc);
 
 	sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0);
@@ -554,7 +553,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
 	sc->spec_config.fft_period = 0xF;
 }
 
-static void ath9k_init_platform(struct ath_softc *sc)
+static void ath9k_init_pcoem_platform(struct ath_softc *sc)
 {
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath9k_hw_capabilities *pCap = &ah->caps;
@@ -589,6 +588,9 @@ static void ath9k_init_platform(struct ath_softc *sc)
 	if (sc->driver_data & ATH9K_PCI_AR9565_2ANT)
 		ath_info(common, "WB335 2-ANT card detected\n");
 
+	if (sc->driver_data & ATH9K_PCI_KILLER)
+		ath_info(common, "Killer Wireless card detected\n");
+
 	/*
 	 * Some WB335 cards do not support antenna diversity. Since
 	 * we use a hardcoded value for AR9565 instead of using the
@@ -661,6 +663,27 @@ static void ath9k_eeprom_release(struct ath_softc *sc)
 	release_firmware(sc->sc_ah->eeprom_blob);
 }
 
+static int ath9k_init_soc_platform(struct ath_softc *sc)
+{
+	struct ath9k_platform_data *pdata = sc->dev->platform_data;
+	struct ath_hw *ah = sc->sc_ah;
+	int ret = 0;
+
+	if (!pdata)
+		return 0;
+
+	if (pdata->eeprom_name) {
+		ret = ath9k_eeprom_request(sc, pdata->eeprom_name);
+		if (ret)
+			return ret;
+	}
+
+	if (pdata->tx_gain_buffalo)
+		ah->config.tx_gain_buffalo = true;
+
+	return ret;
+}
+
 static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
 			    const struct ath_bus_ops *bus_ops)
 {
@@ -681,13 +704,13 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
 	ah->reg_ops.read = ath9k_ioread32;
 	ah->reg_ops.write = ath9k_iowrite32;
 	ah->reg_ops.rmw = ath9k_reg_rmw;
-	atomic_set(&ah->intr_ref_cnt, -1);
 	sc->sc_ah = ah;
 	pCap = &ah->caps;
 
 	common = ath9k_hw_common(ah);
 	sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET);
 	sc->tx99_power = MAX_RATE_POWER + 1;
+	init_waitqueue_head(&sc->tx_wait);
 
 	if (!pdata) {
 		ah->ah_flags |= AH_USE_EEPROM;
@@ -713,7 +736,11 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
 	/*
 	 * Platform quirks.
 	 */
-	ath9k_init_platform(sc);
+	ath9k_init_pcoem_platform(sc);
+
+	ret = ath9k_init_soc_platform(sc);
+	if (ret)
+		return ret;
 
 	/*
 	 * Enable WLAN/BT RX Antenna diversity only when:
@@ -727,7 +754,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
 		common->bt_ant_diversity = 1;
 
 	spin_lock_init(&common->cc_lock);
-
 	spin_lock_init(&sc->sc_serial_rw);
 	spin_lock_init(&sc->sc_pm_lock);
 	mutex_init(&sc->mutex);
@@ -735,11 +761,10 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
 	tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
 		     (unsigned long)sc);
 
+	setup_timer(&sc->sleep_timer, ath_ps_full_sleep, (unsigned long)sc);
 	INIT_WORK(&sc->hw_reset_work, ath_reset_work);
-	INIT_WORK(&sc->hw_check_work, ath_hw_check);
 	INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
 	INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
-	setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
 
 	/*
 	 * Cache line size is used to size and align various
@@ -748,12 +773,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
 	ath_read_cachesize(common, &csz);
 	common->cachelsz = csz << 2; /* convert to bytes */
 
-	if (pdata && pdata->eeprom_name) {
-		ret = ath9k_eeprom_request(sc, pdata->eeprom_name);
-		if (ret)
-			return ret;
-	}
-
 	/* Initializes the hardware for all supported chipsets */
 	ret = ath9k_hw_init(ah);
 	if (ret)
@@ -851,6 +870,9 @@ static const struct ieee80211_iface_limit if_limits[] = {
 
 static const struct ieee80211_iface_limit if_dfs_limits[] = {
 	{ .max = 1,	.types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+				 BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
 				 BIT(NL80211_IFTYPE_ADHOC) },
 };
 
@@ -873,16 +895,7 @@ static const struct ieee80211_iface_combination if_comb[] = {
 	}
 };
 
-#ifdef CONFIG_PM
-static const struct wiphy_wowlan_support ath9k_wowlan_support = {
-	.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
-	.n_patterns = MAX_NUM_USER_PATTERN,
-	.pattern_min_len = 1,
-	.pattern_max_len = MAX_PATTERN_SIZE,
-};
-#endif
-
-void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
 {
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
@@ -931,19 +944,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
 	hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
 	hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
 
-#ifdef CONFIG_PM_SLEEP
-	if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
-	    (sc->driver_data & ATH9K_PCI_WOW) &&
-	    device_can_wakeup(sc->dev))
-		hw->wiphy->wowlan = &ath9k_wowlan_support;
-
-	atomic_set(&sc->wow_sleep_proc_intr, -1);
-	atomic_set(&sc->wow_got_bmiss_intr, -1);
-#endif
-
 	hw->queues = 4;
 	hw->max_rates = 4;
-	hw->channel_change_time = 5000;
 	hw->max_listen_interval = 1;
 	hw->max_rate_tries = 10;
 	hw->sta_data_size = sizeof(struct ath_node);
@@ -966,6 +968,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
 		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
 			&sc->sbands[IEEE80211_BAND_5GHZ];
 
+	ath9k_init_wow(hw);
 	ath9k_reload_chainmask_settings(sc);
 
 	SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
@@ -1064,6 +1067,7 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
 		if (ATH_TXQ_SETUP(sc, i))
 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
 
+	del_timer_sync(&sc->sleep_timer);
 	ath9k_hw_deinit(sc->sc_ah);
 	if (sc->dfs_detector != NULL)
 		sc->dfs_detector->exit(sc->dfs_detector);
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index aed7e29dc50f..30dcef5aba10 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -65,50 +65,26 @@ void ath_tx_complete_poll_work(struct work_struct *work)
 /*
  * Checks if the BB/MAC is hung.
  */
-void ath_hw_check(struct work_struct *work)
+bool ath_hw_check(struct ath_softc *sc)
 {
-	struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-	unsigned long flags;
-	int busy;
-	u8 is_alive, nbeacon = 1;
 	enum ath_reset_type type;
+	bool is_alive;
 
 	ath9k_ps_wakeup(sc);
+
 	is_alive = ath9k_hw_check_alive(sc->sc_ah);
 
-	if ((is_alive && !AR_SREV_9300(sc->sc_ah)) || sc->tx99_state)
-		goto out;
-	else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
+	if (!is_alive) {
 		ath_dbg(common, RESET,
-			"DCU stuck is detected. Schedule chip reset\n");
+			"HW hang detected, schedule chip reset\n");
 		type = RESET_TYPE_MAC_HANG;
-		goto sched_reset;
-	}
-
-	spin_lock_irqsave(&common->cc_lock, flags);
-	busy = ath_update_survey_stats(sc);
-	spin_unlock_irqrestore(&common->cc_lock, flags);
-
-	ath_dbg(common, RESET, "Possible baseband hang, busy=%d (try %d)\n",
-		busy, sc->hw_busy_count + 1);
-	if (busy >= 99) {
-		if (++sc->hw_busy_count >= 3) {
-			type = RESET_TYPE_BB_HANG;
-			goto sched_reset;
-		}
-	} else if (busy >= 0) {
-		sc->hw_busy_count = 0;
-		nbeacon = 3;
+		ath9k_queue_reset(sc, type);
 	}
 
-	ath_start_rx_poll(sc, nbeacon);
-	goto out;
-
-sched_reset:
-	ath9k_queue_reset(sc, type);
-out:
 	ath9k_ps_restore(sc);
+
+	return is_alive;
 }
 
 /*
@@ -162,29 +138,6 @@ void ath_hw_pll_work(struct work_struct *work)
 }
 
 /*
- * RX Polling - monitors baseband hangs.
- */
-void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon)
-{
-	if (!AR_SREV_9300(sc->sc_ah))
-		return;
-
-	if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
-		return;
-
-	mod_timer(&sc->rx_poll_timer, jiffies + msecs_to_jiffies
-		  (nbeacon * sc->cur_beacon_conf.beacon_interval));
-}
-
-void ath_rx_poll(unsigned long data)
-{
-	struct ath_softc *sc = (struct ath_softc *)data;
-
-	if (!test_bit(SC_OP_INVALID, &sc->sc_flags))
-		ieee80211_queue_work(sc->hw, &sc->hw_check_work);
-}
-
-/*
  * PA Pre-distortion.
  */
 static void ath_paprd_activate(struct ath_softc *sc)
@@ -409,10 +362,10 @@ void ath_ani_calibrate(unsigned long data)
 
 	/* Call ANI routine if necessary */
 	if (aniflag) {
-		spin_lock_irqsave(&common->cc_lock, flags);
+		spin_lock(&common->cc_lock);
 		ath9k_hw_ani_monitor(ah, ah->curchan);
 		ath_update_survey_stats(sc);
-		spin_unlock_irqrestore(&common->cc_lock, flags);
+		spin_unlock(&common->cc_lock);
 	}
 
 	/* Perform calibration if necessary */
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 6a18f9d3e9cc..5f727588ca27 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -481,8 +481,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
 			    | AR_Q_MISC_CBR_INCR_DIS0);
 		value = (qi->tqi_readyTime -
 			 (ah->config.sw_beacon_response_time -
-			  ah->config.dma_beacon_response_time) -
-			 ah->config.additional_swba_backoff) * 1024;
+			  ah->config.dma_beacon_response_time)) * 1024;
 		REG_WRITE(ah, AR_QRDYTIMECFG(q),
 			  value | AR_Q_RDYTIMECFG_EN);
 		REG_SET_BIT(ah, AR_DMISC(q),
@@ -550,25 +549,25 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
 
 	if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
 		rs->rs_rssi = ATH9K_RSSI_BAD;
-		rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
-		rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
-		rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
-		rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
-		rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
-		rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
+		rs->rs_rssi_ctl[0] = ATH9K_RSSI_BAD;
+		rs->rs_rssi_ctl[1] = ATH9K_RSSI_BAD;
+		rs->rs_rssi_ctl[2] = ATH9K_RSSI_BAD;
+		rs->rs_rssi_ext[0] = ATH9K_RSSI_BAD;
+		rs->rs_rssi_ext[1] = ATH9K_RSSI_BAD;
+		rs->rs_rssi_ext[2] = ATH9K_RSSI_BAD;
 	} else {
 		rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
-		rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
+		rs->rs_rssi_ctl[0] = MS(ads.ds_rxstatus0,
 						AR_RxRSSIAnt00);
-		rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
+		rs->rs_rssi_ctl[1] = MS(ads.ds_rxstatus0,
 						AR_RxRSSIAnt01);
-		rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
+		rs->rs_rssi_ctl[2] = MS(ads.ds_rxstatus0,
 						AR_RxRSSIAnt02);
-		rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
+		rs->rs_rssi_ext[0] = MS(ads.ds_rxstatus4,
 						AR_RxRSSIAnt10);
-		rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
+		rs->rs_rssi_ext[1] = MS(ads.ds_rxstatus4,
 						AR_RxRSSIAnt11);
-		rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
+		rs->rs_rssi_ext[2] = MS(ads.ds_rxstatus4,
 						AR_RxRSSIAnt12);
 	}
 	if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
@@ -923,11 +922,29 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
 			mask2 |= AR_IMR_S2_CST;
 	}
 
+	if (ah->config.hw_hang_checks & HW_BB_WATCHDOG) {
+		if (ints & ATH9K_INT_BB_WATCHDOG) {
+			mask |= AR_IMR_BCNMISC;
+			mask2 |= AR_IMR_S2_BB_WATCHDOG;
+		}
+	}
+
 	ath_dbg(common, INTERRUPT, "new IMR 0x%x\n", mask);
 	REG_WRITE(ah, AR_IMR, mask);
-	ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
-			   AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
-			   AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
+	ah->imrs2_reg &= ~(AR_IMR_S2_TIM |
+			   AR_IMR_S2_DTIM |
+			   AR_IMR_S2_DTIMSYNC |
+			   AR_IMR_S2_CABEND |
+			   AR_IMR_S2_CABTO |
+			   AR_IMR_S2_TSFOOR |
+			   AR_IMR_S2_GTT |
+			   AR_IMR_S2_CST);
+
+	if (ah->config.hw_hang_checks & HW_BB_WATCHDOG) {
+		if (ints & ATH9K_INT_BB_WATCHDOG)
+			ah->imrs2_reg &= ~AR_IMR_S2_BB_WATCHDOG;
+	}
+
 	ah->imrs2_reg |= mask2;
 	REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
 
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index e3eed81f2439..10271373a0cd 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -133,12 +133,8 @@ struct ath_rx_status {
 	u8 rs_rate;
 	u8 rs_antenna;
 	u8 rs_more;
-	int8_t rs_rssi_ctl0;
-	int8_t rs_rssi_ctl1;
-	int8_t rs_rssi_ctl2;
-	int8_t rs_rssi_ext0;
-	int8_t rs_rssi_ext1;
-	int8_t rs_rssi_ext2;
+	int8_t rs_rssi_ctl[3];
+	int8_t rs_rssi_ext[3];
 	u8 rs_isaggr;
 	u8 rs_firstaggr;
 	u8 rs_moreaggr;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 21aa09e0e825..5924f72dd493 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -82,6 +82,22 @@ static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
 	return ret;
 }
 
+void ath_ps_full_sleep(unsigned long data)
+{
+	struct ath_softc *sc = (struct ath_softc *) data;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+	bool reset;
+
+	spin_lock(&common->cc_lock);
+	ath_hw_cycle_counters_update(common);
+	spin_unlock(&common->cc_lock);
+
+	ath9k_hw_setrxabort(sc->sc_ah, 1);
+	ath9k_hw_stopdmarecv(sc->sc_ah, &reset);
+
+	ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
+}
+
 void ath9k_ps_wakeup(struct ath_softc *sc)
 {
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -92,6 +108,7 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
 	if (++sc->ps_usecount != 1)
 		goto unlock;
 
+	del_timer_sync(&sc->sleep_timer);
 	power_mode = sc->sc_ah->power_mode;
 	ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
 
@@ -117,17 +134,17 @@ void ath9k_ps_restore(struct ath_softc *sc)
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	enum ath9k_power_mode mode;
 	unsigned long flags;
-	bool reset;
 
 	spin_lock_irqsave(&sc->sc_pm_lock, flags);
 	if (--sc->ps_usecount != 0)
 		goto unlock;
 
 	if (sc->ps_idle) {
-		ath9k_hw_setrxabort(sc->sc_ah, 1);
-		ath9k_hw_stopdmarecv(sc->sc_ah, &reset);
-		mode = ATH9K_PM_FULL_SLEEP;
-	} else if (sc->ps_enabled &&
+		mod_timer(&sc->sleep_timer, jiffies + HZ / 10);
+		goto unlock;
+	}
+
+	if (sc->ps_enabled &&
 		   !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
 				     PS_WAIT_FOR_CAB |
 				     PS_WAIT_FOR_PSPOLL_DATA |
@@ -153,7 +170,6 @@ void ath9k_ps_restore(struct ath_softc *sc)
 static void __ath_cancel_work(struct ath_softc *sc)
 {
 	cancel_work_sync(&sc->paprd_work);
-	cancel_work_sync(&sc->hw_check_work);
 	cancel_delayed_work_sync(&sc->tx_complete_work);
 	cancel_delayed_work_sync(&sc->hw_pll_work);
 
@@ -163,13 +179,13 @@ static void __ath_cancel_work(struct ath_softc *sc)
 #endif
 }
 
-static void ath_cancel_work(struct ath_softc *sc)
+void ath_cancel_work(struct ath_softc *sc)
 {
 	__ath_cancel_work(sc);
 	cancel_work_sync(&sc->hw_reset_work);
 }
 
-static void ath_restart_work(struct ath_softc *sc)
+void ath_restart_work(struct ath_softc *sc)
 {
 	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
 
@@ -177,7 +193,6 @@ static void ath_restart_work(struct ath_softc *sc)
 		ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
 				     msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
 
-	ath_start_rx_poll(sc, 3);
 	ath_start_ani(sc);
 }
 
@@ -187,11 +202,7 @@ static bool ath_prepare_reset(struct ath_softc *sc)
 	bool ret = true;
 
 	ieee80211_stop_queues(sc->hw);
-
-	sc->hw_busy_count = 0;
 	ath_stop_ani(sc);
-	del_timer_sync(&sc->rx_poll_timer);
-
 	ath9k_hw_disable_interrupts(ah);
 
 	if (!ath_drain_all_txq(sc))
@@ -247,6 +258,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
 		}
 	}
 
+	sc->gtt_cnt = 0;
 	ieee80211_wake_queues(sc->hw);
 
 	return true;
@@ -319,7 +331,6 @@ static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chand
 	struct ieee80211_hw *hw = sc->hw;
 	struct ath9k_channel *hchan;
 	struct ieee80211_channel *chan = chandef->chan;
-	unsigned long flags;
 	bool offchannel;
 	int pos = chan->hw_value;
 	int old_pos = -1;
@@ -337,9 +348,9 @@ static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chand
 		chan->center_freq, chandef->width);
 
 	/* update survey stats for the old channel before switching */
-	spin_lock_irqsave(&common->cc_lock, flags);
+	spin_lock_bh(&common->cc_lock);
 	ath_update_survey_stats(sc);
-	spin_unlock_irqrestore(&common->cc_lock, flags);
+	spin_unlock_bh(&common->cc_lock);
 
 	ath9k_cmn_get_channel(hw, ah, chandef);
 
@@ -410,12 +421,6 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
 	an->vif = vif;
 
 	ath_tx_node_init(sc, an);
-
-	if (sta->ht_cap.ht_supported) {
-		an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
-				     sta->ht_cap.ampdu_factor);
-		an->mpdudensity = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
-	}
 }
 
 static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
@@ -437,14 +442,8 @@ void ath9k_tasklet(unsigned long data)
 	ath9k_ps_wakeup(sc);
 	spin_lock(&sc->sc_pcu_lock);
 
-	if ((status & ATH9K_INT_FATAL) ||
-	    (status & ATH9K_INT_BB_WATCHDOG)) {
-
-		if (status & ATH9K_INT_FATAL)
-			type = RESET_TYPE_FATAL_INT;
-		else
-			type = RESET_TYPE_BB_WATCHDOG;
-
+	if (status & ATH9K_INT_FATAL) {
+		type = RESET_TYPE_FATAL_INT;
 		ath9k_queue_reset(sc, type);
 
 		/*
@@ -456,6 +455,41 @@ void ath9k_tasklet(unsigned long data)
 		goto out;
 	}
 
+	if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) &&
+	    (status & ATH9K_INT_BB_WATCHDOG)) {
+		spin_lock(&common->cc_lock);
+		ath_hw_cycle_counters_update(common);
+		ar9003_hw_bb_watchdog_dbg_info(ah);
+		spin_unlock(&common->cc_lock);
+
+		if (ar9003_hw_bb_watchdog_check(ah)) {
+			type = RESET_TYPE_BB_WATCHDOG;
+			ath9k_queue_reset(sc, type);
+
+			/*
+			 * Increment the ref. counter here so that
+			 * interrupts are enabled in the reset routine.
+			 */
+			atomic_inc(&ah->intr_ref_cnt);
+			ath_dbg(common, ANY,
+				"BB_WATCHDOG: Skipping interrupts\n");
+			goto out;
+		}
+	}
+
+	if (status & ATH9K_INT_GTT) {
+		sc->gtt_cnt++;
+
+		if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) {
+			type = RESET_TYPE_TX_GTT;
+			ath9k_queue_reset(sc, type);
+			atomic_inc(&ah->intr_ref_cnt);
+			ath_dbg(common, ANY,
+				"GTT: Skipping interrupts\n");
+			goto out;
+		}
+	}
+
 	spin_lock_irqsave(&sc->sc_pm_lock, flags);
 	if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
 		/*
@@ -483,12 +517,26 @@ void ath9k_tasklet(unsigned long data)
 	}
 
 	if (status & ATH9K_INT_TX) {
-		if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+		if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+			/*
+			 * For EDMA chips, TX completion is enabled for the
+			 * beacon queue, so if a beacon has been transmitted
+			 * successfully after a GTT interrupt, the GTT counter
+			 * gets reset to zero here.
+			 */
+			sc->gtt_cnt = 0;
+
 			ath_tx_edma_tasklet(sc);
-		else
+		} else {
 			ath_tx_tasklet(sc);
+		}
+
+		wake_up(&sc->tx_wait);
 	}
 
+	if (status & ATH9K_INT_GENTIMER)
+		ath_gen_timer_isr(sc->sc_ah);
+
 	ath9k_btcoex_handle_interrupt(sc, status);
 
 	/* re-enable hardware interrupt */
@@ -511,14 +559,15 @@ irqreturn_t ath_isr(int irq, void *dev)
 		ATH9K_INT_TX |			\
 		ATH9K_INT_BMISS |		\
 		ATH9K_INT_CST |			\
+		ATH9K_INT_GTT |			\
 		ATH9K_INT_TSFOOR |		\
 		ATH9K_INT_GENTIMER |		\
 		ATH9K_INT_MCI)
 
 	struct ath_softc *sc = dev;
 	struct ath_hw *ah = sc->sc_ah;
-	struct ath_common *common = ath9k_hw_common(ah);
 	enum ath9k_int status;
+	u32 sync_cause = 0;
 	bool sched = false;
 
 	/*
@@ -545,7 +594,8 @@ irqreturn_t ath_isr(int irq, void *dev)
 	 * bits we haven't explicitly enabled so we mask the
 	 * value to insure we only process bits we requested.
 	 */
-	ath9k_hw_getisr(ah, &status);	/* NB: clears ISR too */
+	ath9k_hw_getisr(ah, &status, &sync_cause); /* NB: clears ISR too */
+	ath9k_debug_sync_cause(sc, sync_cause);
 	status &= ah->imask;	/* discard unasked-for bits */
 
 	/*
@@ -569,25 +619,19 @@ irqreturn_t ath_isr(int irq, void *dev)
 	    !(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)))
 		goto chip_reset;
 
-	if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
-	    (status & ATH9K_INT_BB_WATCHDOG)) {
-
-		spin_lock(&common->cc_lock);
-		ath_hw_cycle_counters_update(common);
-		ar9003_hw_bb_watchdog_dbg_info(ah);
-		spin_unlock(&common->cc_lock);
-
+	if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) &&
+	    (status & ATH9K_INT_BB_WATCHDOG))
 		goto chip_reset;
-	}
-#ifdef CONFIG_PM_SLEEP
+
+#ifdef CONFIG_ATH9K_WOW
 	if (status & ATH9K_INT_BMISS) {
 		if (atomic_read(&sc->wow_sleep_proc_intr) == 0) {
-			ath_dbg(common, ANY, "during WoW we got a BMISS\n");
 			atomic_inc(&sc->wow_got_bmiss_intr);
 			atomic_dec(&sc->wow_sleep_proc_intr);
 		}
 	}
 #endif
+
 	if (status & ATH9K_INT_SWBA)
 		tasklet_schedule(&sc->bcon_tasklet);
 
@@ -627,7 +671,7 @@ chip_reset:
 #undef SCHED_INTR
 }
 
-static int ath_reset(struct ath_softc *sc)
+int ath_reset(struct ath_softc *sc)
 {
 	int r;
 
@@ -705,12 +749,19 @@ static int ath9k_start(struct ieee80211_hw *hw)
 
 	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
 		ah->imask |= ATH9K_INT_RXHP |
-			     ATH9K_INT_RXLP |
-			     ATH9K_INT_BB_WATCHDOG;
+			     ATH9K_INT_RXLP;
 	else
 		ah->imask |= ATH9K_INT_RX;
 
-	ah->imask |= ATH9K_INT_GTT;
+	if (ah->config.hw_hang_checks & HW_BB_WATCHDOG)
+		ah->imask |= ATH9K_INT_BB_WATCHDOG;
+
+	/*
+	 * Enable GTT interrupts only for AR9003/AR9004 chips
+	 * for now.
+	 */
+	if (AR_SREV_9300_20_OR_LATER(ah))
+		ah->imask |= ATH9K_INT_GTT;
 
 	if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
 		ah->imask |= ATH9K_INT_CST;
@@ -735,6 +786,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
 	 */
 	ath9k_cmn_init_crypto(sc->sc_ah);
 
+	ath9k_hw_reset_tsf(ah);
+
 	spin_unlock_bh(&sc->sc_pcu_lock);
 
 	mutex_unlock(&sc->mutex);
@@ -831,7 +884,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
 	mutex_lock(&sc->mutex);
 
 	ath_cancel_work(sc);
-	del_timer_sync(&sc->rx_poll_timer);
 
 	if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
 		ath_dbg(common, ANY, "Device not present\n");
@@ -1636,13 +1688,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
 	}
 
 	if ((changed & BSS_CHANGED_BEACON_ENABLED) ||
-	    (changed & BSS_CHANGED_BEACON_INT)) {
-		if (ah->opmode == NL80211_IFTYPE_AP &&
-		    bss_conf->enable_beacon)
-			ath9k_set_tsfadjust(sc, vif);
-		if (ath9k_allow_beacon_config(sc, vif))
-			ath9k_beacon_config(sc, vif, changed);
-	}
+	    (changed & BSS_CHANGED_BEACON_INT))
+		ath9k_beacon_config(sc, vif, changed);
 
 	if (changed & BSS_CHANGED_ERP_SLOT) {
 		if (bss_conf->use_short_slot)
@@ -1767,13 +1814,12 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ieee80211_supported_band *sband;
 	struct ieee80211_channel *chan;
-	unsigned long flags;
 	int pos;
 
 	if (config_enabled(CONFIG_ATH9K_TX99))
 		return -EOPNOTSUPP;
 
-	spin_lock_irqsave(&common->cc_lock, flags);
+	spin_lock_bh(&common->cc_lock);
 	if (idx == 0)
 		ath_update_survey_stats(sc);
 
@@ -1787,7 +1833,7 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
 		sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
 
 	if (!sband || idx >= sband->n_channels) {
-		spin_unlock_irqrestore(&common->cc_lock, flags);
+		spin_unlock_bh(&common->cc_lock);
 		return -ENOENT;
 	}
 
@@ -1795,7 +1841,7 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
 	pos = chan->hw_value;
 	memcpy(survey, &sc->survey[pos], sizeof(*survey));
 	survey->channel = chan;
-	spin_unlock_irqrestore(&common->cc_lock, flags);
+	spin_unlock_bh(&common->cc_lock);
 
 	return 0;
 }
@@ -1818,13 +1864,31 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
 	mutex_unlock(&sc->mutex);
 }
 
+static bool ath9k_has_tx_pending(struct ath_softc *sc)
+{
+	int i, npend;
+
+	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+		if (!ATH_TXQ_SETUP(sc, i))
+			continue;
+
+		if (!sc->tx.txq[i].axq_depth)
+			continue;
+
+		npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
+		if (npend)
+			break;
+	}
+
+	return !!npend;
+}
+
 static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
 {
 	struct ath_softc *sc = hw->priv;
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
-	int timeout = 200; /* ms */
-	int i, j;
+	int timeout = HZ / 5; /* 200 ms */
 	bool drain_txq;
 
 	mutex_lock(&sc->mutex);
@@ -1842,25 +1906,9 @@ static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
 		return;
 	}
 
-	for (j = 0; j < timeout; j++) {
-		bool npend = false;
-
-		if (j)
-			usleep_range(1000, 2000);
-
-		for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
-			if (!ATH_TXQ_SETUP(sc, i))
-				continue;
-
-			npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
-
-			if (npend)
-				break;
-		}
-
-		if (!npend)
-		    break;
-	}
+	if (wait_event_timeout(sc->tx_wait, !ath9k_has_tx_pending(sc),
+			       timeout) > 0)
+		drop = false;
 
 	if (drop) {
 		ath9k_ps_wakeup(sc);
@@ -2022,333 +2070,6 @@ static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
 	return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-
-static void ath9k_wow_map_triggers(struct ath_softc *sc,
-				   struct cfg80211_wowlan *wowlan,
-				   u32 *wow_triggers)
-{
-	if (wowlan->disconnect)
-		*wow_triggers |= AH_WOW_LINK_CHANGE |
-				 AH_WOW_BEACON_MISS;
-	if (wowlan->magic_pkt)
-		*wow_triggers |= AH_WOW_MAGIC_PATTERN_EN;
-
-	if (wowlan->n_patterns)
-		*wow_triggers |= AH_WOW_USER_PATTERN_EN;
-
-	sc->wow_enabled = *wow_triggers;
-
-}
-
-static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
-{
-	struct ath_hw *ah = sc->sc_ah;
-	struct ath_common *common = ath9k_hw_common(ah);
-	int pattern_count = 0;
-	int i, byte_cnt;
-	u8 dis_deauth_pattern[MAX_PATTERN_SIZE];
-	u8 dis_deauth_mask[MAX_PATTERN_SIZE];
-
-	memset(dis_deauth_pattern, 0, MAX_PATTERN_SIZE);
-	memset(dis_deauth_mask, 0, MAX_PATTERN_SIZE);
-
-	/*
-	 * Create Dissassociate / Deauthenticate packet filter
-	 *
-	 *     2 bytes        2 byte    6 bytes   6 bytes  6 bytes
-	 *  +--------------+----------+---------+--------+--------+----
-	 *  + Frame Control+ Duration +   DA    +  SA    +  BSSID +
-	 *  +--------------+----------+---------+--------+--------+----
-	 *
-	 * The above is the management frame format for disassociate/
-	 * deauthenticate pattern, from this we need to match the first byte
-	 * of 'Frame Control' and DA, SA, and BSSID fields
-	 * (skipping 2nd byte of FC and Duration feild.
-	 *
-	 * Disassociate pattern
-	 * --------------------
-	 * Frame control = 00 00 1010
-	 * DA, SA, BSSID = x:x:x:x:x:x
-	 * Pattern will be A0000000 | x:x:x:x:x:x | x:x:x:x:x:x
-	 *			    | x:x:x:x:x:x  -- 22 bytes
-	 *
-	 * Deauthenticate pattern
-	 * ----------------------
-	 * Frame control = 00 00 1100
-	 * DA, SA, BSSID = x:x:x:x:x:x
-	 * Pattern will be C0000000 | x:x:x:x:x:x | x:x:x:x:x:x
-	 *			    | x:x:x:x:x:x  -- 22 bytes
-	 */
-
-	/* Create Disassociate Pattern first */
-
-	byte_cnt = 0;
-
-	/* Fill out the mask with all FF's */
-
-	for (i = 0; i < MAX_PATTERN_MASK_SIZE; i++)
-		dis_deauth_mask[i] = 0xff;
-
-	/* copy the first byte of frame control field */
-	dis_deauth_pattern[byte_cnt] = 0xa0;
-	byte_cnt++;
-
-	/* skip 2nd byte of frame control and Duration field */
-	byte_cnt += 3;
-
-	/*
-	 * need not match the destination mac address, it can be a broadcast
-	 * mac address or an unicast to this station
-	 */
-	byte_cnt += 6;
-
-	/* copy the source mac address */
-	memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
-
-	byte_cnt += 6;
-
-	/* copy the bssid, its same as the source mac address */
-
-	memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
-
-	/* Create Disassociate pattern mask */
-
-	dis_deauth_mask[0] = 0xfe;
-	dis_deauth_mask[1] = 0x03;
-	dis_deauth_mask[2] = 0xc0;
-
-	ath_dbg(common, WOW, "Adding disassoc/deauth patterns for WoW\n");
-
-	ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
-				   pattern_count, byte_cnt);
-
-	pattern_count++;
-	/*
-	 * for de-authenticate pattern, only the first byte of the frame
-	 * control field gets changed from 0xA0 to 0xC0
-	 */
-	dis_deauth_pattern[0] = 0xC0;
-
-	ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
-				   pattern_count, byte_cnt);
-
-}
-
-static void ath9k_wow_add_pattern(struct ath_softc *sc,
-				  struct cfg80211_wowlan *wowlan)
-{
-	struct ath_hw *ah = sc->sc_ah;
-	struct ath9k_wow_pattern *wow_pattern = NULL;
-	struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
-	int mask_len;
-	s8 i = 0;
-
-	if (!wowlan->n_patterns)
-		return;
-
-	/*
-	 * Add the new user configured patterns
-	 */
-	for (i = 0; i < wowlan->n_patterns; i++) {
-
-		wow_pattern = kzalloc(sizeof(*wow_pattern), GFP_KERNEL);
-
-		if (!wow_pattern)
-			return;
-
-		/*
-		 * TODO: convert the generic user space pattern to
-		 * appropriate chip specific/802.11 pattern.
-		 */
-
-		mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
-		memset(wow_pattern->pattern_bytes, 0, MAX_PATTERN_SIZE);
-		memset(wow_pattern->mask_bytes, 0, MAX_PATTERN_SIZE);
-		memcpy(wow_pattern->pattern_bytes, patterns[i].pattern,
-		       patterns[i].pattern_len);
-		memcpy(wow_pattern->mask_bytes, patterns[i].mask, mask_len);
-		wow_pattern->pattern_len = patterns[i].pattern_len;
-
-		/*
-		 * just need to take care of deauth and disssoc pattern,
-		 * make sure we don't overwrite them.
-		 */
-
-		ath9k_hw_wow_apply_pattern(ah, wow_pattern->pattern_bytes,
-					   wow_pattern->mask_bytes,
-					   i + 2,
-					   wow_pattern->pattern_len);
-		kfree(wow_pattern);
-
-	}
-
-}
-
-static int ath9k_suspend(struct ieee80211_hw *hw,
-			 struct cfg80211_wowlan *wowlan)
-{
-	struct ath_softc *sc = hw->priv;
-	struct ath_hw *ah = sc->sc_ah;
-	struct ath_common *common = ath9k_hw_common(ah);
-	u32 wow_triggers_enabled = 0;
-	int ret = 0;
-
-	mutex_lock(&sc->mutex);
-
-	ath_cancel_work(sc);
-	ath_stop_ani(sc);
-	del_timer_sync(&sc->rx_poll_timer);
-
-	if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
-		ath_dbg(common, ANY, "Device not present\n");
-		ret = -EINVAL;
-		goto fail_wow;
-	}
-
-	if (WARN_ON(!wowlan)) {
-		ath_dbg(common, WOW, "None of the WoW triggers enabled\n");
-		ret = -EINVAL;
-		goto fail_wow;
-	}
-
-	if (!device_can_wakeup(sc->dev)) {
-		ath_dbg(common, WOW, "device_can_wakeup failed, WoW is not enabled\n");
-		ret = 1;
-		goto fail_wow;
-	}
-
-	/*
-	 * none of the sta vifs are associated
-	 * and we are not currently handling multivif
-	 * cases, for instance we have to seperately
-	 * configure 'keep alive frame' for each
-	 * STA.
-	 */
-
-	if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
-		ath_dbg(common, WOW, "None of the STA vifs are associated\n");
-		ret = 1;
-		goto fail_wow;
-	}
-
-	if (sc->nvifs > 1) {
-		ath_dbg(common, WOW, "WoW for multivif is not yet supported\n");
-		ret = 1;
-		goto fail_wow;
-	}
-
-	ath9k_wow_map_triggers(sc, wowlan, &wow_triggers_enabled);
-
-	ath_dbg(common, WOW, "WoW triggers enabled 0x%x\n",
-		wow_triggers_enabled);
-
-	ath9k_ps_wakeup(sc);
-
-	ath9k_stop_btcoex(sc);
-
-	/*
-	 * Enable wake up on recieving disassoc/deauth
-	 * frame by default.
-	 */
-	ath9k_wow_add_disassoc_deauth_pattern(sc);
-
-	if (wow_triggers_enabled & AH_WOW_USER_PATTERN_EN)
-		ath9k_wow_add_pattern(sc, wowlan);
-
-	spin_lock_bh(&sc->sc_pcu_lock);
-	/*
-	 * To avoid false wake, we enable beacon miss interrupt only
-	 * when we go to sleep. We save the current interrupt mask
-	 * so we can restore it after the system wakes up
-	 */
-	sc->wow_intr_before_sleep = ah->imask;
-	ah->imask &= ~ATH9K_INT_GLOBAL;
-	ath9k_hw_disable_interrupts(ah);
-	ah->imask = ATH9K_INT_BMISS | ATH9K_INT_GLOBAL;
-	ath9k_hw_set_interrupts(ah);
-	ath9k_hw_enable_interrupts(ah);
-
-	spin_unlock_bh(&sc->sc_pcu_lock);
-
-	/*
-	 * we can now sync irq and kill any running tasklets, since we already
-	 * disabled interrupts and not holding a spin lock
-	 */
-	synchronize_irq(sc->irq);
-	tasklet_kill(&sc->intr_tq);
-
-	ath9k_hw_wow_enable(ah, wow_triggers_enabled);
-
-	ath9k_ps_restore(sc);
-	ath_dbg(common, ANY, "WoW enabled in ath9k\n");
-	atomic_inc(&sc->wow_sleep_proc_intr);
-
-fail_wow:
-	mutex_unlock(&sc->mutex);
-	return ret;
-}
-
-static int ath9k_resume(struct ieee80211_hw *hw)
-{
-	struct ath_softc *sc = hw->priv;
-	struct ath_hw *ah = sc->sc_ah;
-	struct ath_common *common = ath9k_hw_common(ah);
-	u32 wow_status;
-
-	mutex_lock(&sc->mutex);
-
-	ath9k_ps_wakeup(sc);
-
-	spin_lock_bh(&sc->sc_pcu_lock);
-
-	ath9k_hw_disable_interrupts(ah);
-	ah->imask = sc->wow_intr_before_sleep;
-	ath9k_hw_set_interrupts(ah);
-	ath9k_hw_enable_interrupts(ah);
-
-	spin_unlock_bh(&sc->sc_pcu_lock);
-
-	wow_status = ath9k_hw_wow_wakeup(ah);
-
-	if (atomic_read(&sc->wow_got_bmiss_intr) == 0) {
-		/*
-		 * some devices may not pick beacon miss
-		 * as the reason they woke up so we add
-		 * that here for that shortcoming.
-		 */
-		wow_status |= AH_WOW_BEACON_MISS;
-		atomic_dec(&sc->wow_got_bmiss_intr);
-		ath_dbg(common, ANY, "Beacon miss interrupt picked up during WoW sleep\n");
-	}
-
-	atomic_dec(&sc->wow_sleep_proc_intr);
-
-	if (wow_status) {
-		ath_dbg(common, ANY, "Waking up due to WoW triggers %s with WoW status = %x\n",
-			ath9k_hw_wow_event_to_string(wow_status), wow_status);
-	}
-
-	ath_restart_work(sc);
-	ath9k_start_btcoex(sc);
-
-	ath9k_ps_restore(sc);
-	mutex_unlock(&sc->mutex);
-
-	return 0;
-}
-
-static void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
-{
-	struct ath_softc *sc = hw->priv;
-
-	mutex_lock(&sc->mutex);
-	device_init_wakeup(sc->dev, 1);
-	device_set_wakeup_enable(sc->dev, enabled);
-	mutex_unlock(&sc->mutex);
-}
-
-#endif
 static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
 {
 	struct ath_softc *sc = hw->priv;
@@ -2374,134 +2095,6 @@ static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
 	sc->csa_vif = vif;
 }
 
-static void ath9k_tx99_stop(struct ath_softc *sc)
-{
-	struct ath_hw *ah = sc->sc_ah;
-	struct ath_common *common = ath9k_hw_common(ah);
-
-	ath_drain_all_txq(sc);
-	ath_startrecv(sc);
-
-	ath9k_hw_set_interrupts(ah);
-	ath9k_hw_enable_interrupts(ah);
-
-	ieee80211_wake_queues(sc->hw);
-
-	kfree_skb(sc->tx99_skb);
-	sc->tx99_skb = NULL;
-	sc->tx99_state = false;
-
-	ath9k_hw_tx99_stop(sc->sc_ah);
-	ath_dbg(common, XMIT, "TX99 stopped\n");
-}
-
-static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
-{
-	static u8 PN9Data[] = {0xff, 0x87, 0xb8, 0x59, 0xb7, 0xa1, 0xcc, 0x24,
-			       0x57, 0x5e, 0x4b, 0x9c, 0x0e, 0xe9, 0xea, 0x50,
-			       0x2a, 0xbe, 0xb4, 0x1b, 0xb6, 0xb0, 0x5d, 0xf1,
-			       0xe6, 0x9a, 0xe3, 0x45, 0xfd, 0x2c, 0x53, 0x18,
-			       0x0c, 0xca, 0xc9, 0xfb, 0x49, 0x37, 0xe5, 0xa8,
-			       0x51, 0x3b, 0x2f, 0x61, 0xaa, 0x72, 0x18, 0x84,
-			       0x02, 0x23, 0x23, 0xab, 0x63, 0x89, 0x51, 0xb3,
-			       0xe7, 0x8b, 0x72, 0x90, 0x4c, 0xe8, 0xfb, 0xc0};
-	u32 len = 1200;
-	struct ieee80211_hw *hw = sc->hw;
-	struct ieee80211_hdr *hdr;
-	struct ieee80211_tx_info *tx_info;
-	struct sk_buff *skb;
-
-	skb = alloc_skb(len, GFP_KERNEL);
-	if (!skb)
-		return NULL;
-
-	skb_put(skb, len);
-
-	memset(skb->data, 0, len);
-
-	hdr = (struct ieee80211_hdr *)skb->data;
-	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA);
-	hdr->duration_id = 0;
-
-	memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
-	memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
-	memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
-
-	hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
-
-	tx_info = IEEE80211_SKB_CB(skb);
-	memset(tx_info, 0, sizeof(*tx_info));
-	tx_info->band = hw->conf.chandef.chan->band;
-	tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
-	tx_info->control.vif = sc->tx99_vif;
-
-	memcpy(skb->data + sizeof(*hdr), PN9Data, sizeof(PN9Data));
-
-	return skb;
-}
-
-void ath9k_tx99_deinit(struct ath_softc *sc)
-{
-	ath_reset(sc);
-
-	ath9k_ps_wakeup(sc);
-	ath9k_tx99_stop(sc);
-	ath9k_ps_restore(sc);
-}
-
-int ath9k_tx99_init(struct ath_softc *sc)
-{
-	struct ieee80211_hw *hw = sc->hw;
-	struct ath_hw *ah = sc->sc_ah;
-	struct ath_common *common = ath9k_hw_common(ah);
-	struct ath_tx_control txctl;
-	int r;
-
-	if (sc->sc_flags & SC_OP_INVALID) {
-		ath_err(common,
-			"driver is in invalid state unable to use TX99");
-		return -EINVAL;
-	}
-
-	sc->tx99_skb = ath9k_build_tx99_skb(sc);
-	if (!sc->tx99_skb)
-		return -ENOMEM;
-
-	memset(&txctl, 0, sizeof(txctl));
-	txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
-
-	ath_reset(sc);
-
-	ath9k_ps_wakeup(sc);
-
-	ath9k_hw_disable_interrupts(ah);
-	atomic_set(&ah->intr_ref_cnt, -1);
-	ath_drain_all_txq(sc);
-	ath_stoprecv(sc);
-
-	sc->tx99_state = true;
-
-	ieee80211_stop_queues(hw);
-
-	if (sc->tx99_power == MAX_RATE_POWER + 1)
-		sc->tx99_power = MAX_RATE_POWER;
-
-	ath9k_hw_tx99_set_txpower(ah, sc->tx99_power);
-	r = ath9k_tx99_send(sc, sc->tx99_skb, &txctl);
-	if (r) {
-		ath_dbg(common, XMIT, "Failed to xmit TX99 skb\n");
-		return r;
-	}
-
-	ath_dbg(common, XMIT, "TX99 xmit started using %d ( %ddBm)\n",
-		sc->tx99_power,
-		sc->tx99_power / 2);
-
-	/* We leave the harware awake as it will be chugging on */
-
-	return 0;
-}
-
 struct ieee80211_ops ath9k_ops = {
 	.tx 		    = ath9k_tx,
 	.start 		    = ath9k_start,
@@ -2532,7 +2125,7 @@ struct ieee80211_ops ath9k_ops = {
 	.set_antenna	    = ath9k_set_antenna,
 	.get_antenna	    = ath9k_get_antenna,
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ATH9K_WOW
 	.suspend	    = ath9k_suspend,
 	.resume		    = ath9k_resume,
 	.set_wakeup	    = ath9k_set_wakeup,
@@ -2544,7 +2137,7 @@ struct ieee80211_ops ath9k_ops = {
 	.get_et_strings     = ath9k_get_et_strings,
 #endif
 
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_STATION_STATISTICS)
 	.sta_add_debugfs    = ath9k_sta_add_debugfs,
 #endif
 	.sw_scan_start	    = ath9k_sw_scan_start,
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 0ac1b5f04256..71799fcade54 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -200,7 +200,7 @@ skip_tuning:
 	if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
 		btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE;
 
-	btcoex->btcoex_no_stomp =  btcoex->btcoex_period * 1000 *
+	btcoex->btcoex_no_stomp =  btcoex->btcoex_period *
 		(100 - btcoex->duty_cycle) / 100;
 
 	ath9k_hw_btcoex_enable(sc->sc_ah);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index b5656fce4ff5..55724b02316b 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -87,6 +87,19 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
 	{ PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
 	{ PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI   */
 	{ PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
+
+	/* Killer Wireless (3x3) */
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0030,
+			 0x1A56,
+			 0x2000),
+	  .driver_data = ATH9K_PCI_KILLER },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0030,
+			 0x1A56,
+			 0x2001),
+	  .driver_data = ATH9K_PCI_KILLER },
+
 	{ PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E  AR9300 */
 
 	/* PCI-E CUS198 */
@@ -354,6 +367,13 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
 			 0x1783),
 	  .driver_data = ATH9K_PCI_WOW },
 
+	/* Killer Wireless (2x2) */
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0030,
+			 0x1A56,
+			 0x2003),
+	  .driver_data = ATH9K_PCI_KILLER },
+
 	{ PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E  AR9462 */
 	{ PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E  AR1111/AR9485 */
 
@@ -392,6 +412,16 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
 			 0x0036,
 			 0x11AD, /* LITEON */
+			 0x06B2),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x11AD, /* LITEON */
+			 0x0842),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x11AD, /* LITEON */
 			 0x6671),
 	  .driver_data = ATH9K_PCI_AR9565_1ANT },
 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
@@ -404,6 +434,16 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
 			 0x1B9A, /* XAVI */
 			 0x2812),
 	  .driver_data = ATH9K_PCI_AR9565_1ANT },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x1B9A, /* XAVI */
+			 0x28A1),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_AZWAVE,
+			 0x218A),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT },
 
 	/* WB335 1-ANT / Antenna Diversity */
 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
@@ -448,13 +488,18 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
 	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
 			 0x0036,
-			 PCI_VENDOR_ID_AZWAVE,
-			 0x213A),
+			 0x11AD, /* LITEON */
+			 0x06A2),
 	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
 			 0x0036,
-			 PCI_VENDOR_ID_LENOVO,
-			 0x3026),
+			 0x11AD, /* LITEON */
+			 0x0682),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_AZWAVE,
+			 0x213A),
 	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
 			 0x0036,
@@ -468,38 +513,41 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
 	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
 			 0x0036,
+			 PCI_VENDOR_ID_HP,
+			 0x2005),
+	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
 			 PCI_VENDOR_ID_DELL,
-			 0x020E),
+			 0x020C),
 	  .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
 
-	/* WB335 2-ANT */
+	/* WB335 2-ANT / Antenna-Diversity */
 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
 			 0x0036,
 			 PCI_VENDOR_ID_SAMSUNG,
 			 0x411A),
-	  .driver_data = ATH9K_PCI_AR9565_2ANT },
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
 			 0x0036,
 			 PCI_VENDOR_ID_SAMSUNG,
 			 0x411B),
-	  .driver_data = ATH9K_PCI_AR9565_2ANT },
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
 			 0x0036,
 			 PCI_VENDOR_ID_SAMSUNG,
 			 0x411C),
-	  .driver_data = ATH9K_PCI_AR9565_2ANT },
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
 			 0x0036,
 			 PCI_VENDOR_ID_SAMSUNG,
 			 0x411D),
-	  .driver_data = ATH9K_PCI_AR9565_2ANT },
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
 			 0x0036,
 			 PCI_VENDOR_ID_SAMSUNG,
 			 0x411E),
-	  .driver_data = ATH9K_PCI_AR9565_2ANT },
-
-	/* WB335 2-ANT / Antenna-Diversity */
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
 			 0x0036,
 			 PCI_VENDOR_ID_ATHEROS,
@@ -527,11 +575,31 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
 	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
 			 0x0036,
+			 0x11AD, /* LITEON */
+			 0x0832),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x11AD, /* LITEON */
+			 0x0692),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
 			 PCI_VENDOR_ID_AZWAVE,
 			 0x2130),
 	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
 			 0x0036,
+			 PCI_VENDOR_ID_AZWAVE,
+			 0x213B),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_AZWAVE,
+			 0x2182),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
 			 0x144F, /* ASKEY */
 			 0x7202),
 	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
@@ -542,9 +610,49 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
 	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
 			 0x0036,
+			 0x1B9A, /* XAVI */
+			 0x28A2),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
 			 0x185F, /* WNC */
 			 0x3027),
 	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 0x185F, /* WNC */
+			 0xA120),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_FOXCONN,
+			 0xE07F),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_FOXCONN,
+			 0xE081),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_LENOVO,
+			 0x3026),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_LENOVO,
+			 0x4026),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_ASUSTEK,
+			 0x85F2),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0036,
+			 PCI_VENDOR_ID_DELL,
+			 0x020E),
+	  .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
 
 	/* PCI-E AR9565 (WB335) */
 	{ PCI_VDEVICE(ATHEROS, 0x0036),
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 95ddca5495d4..a0ebdd000fc2 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -15,7 +15,6 @@
  */
 
 #include <linux/dma-mapping.h>
-#include <linux/relay.h>
 #include "ath9k.h"
 #include "ar9003_mac.h"
 
@@ -420,7 +419,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
 		rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
 	}
 
-	if (AR_SREV_9550(sc->sc_ah))
+	if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah))
 		rfilt |= ATH9K_RX_FILTER_4ADDRESS;
 
 	return rfilt;
@@ -851,20 +850,15 @@ static int ath9k_process_rate(struct ath_common *common,
 	enum ieee80211_band band;
 	unsigned int i = 0;
 	struct ath_softc __maybe_unused *sc = common->priv;
+	struct ath_hw *ah = sc->sc_ah;
 
-	band = hw->conf.chandef.chan->band;
+	band = ah->curchan->chan->band;
 	sband = hw->wiphy->bands[band];
 
-	switch (hw->conf.chandef.width) {
-	case NL80211_CHAN_WIDTH_5:
+	if (IS_CHAN_QUARTER_RATE(ah->curchan))
 		rxs->flag |= RX_FLAG_5MHZ;
-		break;
-	case NL80211_CHAN_WIDTH_10:
+	else if (IS_CHAN_HALF_RATE(ah->curchan))
 		rxs->flag |= RX_FLAG_10MHZ;
-		break;
-	default:
-		break;
-	}
 
 	if (rx_stats->rs_rate & 0x80) {
 		/* HT rate */
@@ -906,6 +900,7 @@ static void ath9k_process_rssi(struct ath_common *common,
 	struct ath_hw *ah = common->ah;
 	int last_rssi;
 	int rssi = rx_stats->rs_rssi;
+	int i, j;
 
 	/*
 	 * RSSI is not available for subframes in an A-MPDU.
@@ -924,6 +919,20 @@ static void ath9k_process_rssi(struct ath_common *common,
 		return;
 	}
 
+	for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) {
+		s8 rssi;
+
+		if (!(ah->rxchainmask & BIT(i)))
+			continue;
+
+		rssi = rx_stats->rs_rssi_ctl[i];
+		if (rssi != ATH9K_RSSI_BAD) {
+		    rxs->chains |= BIT(j);
+		    rxs->chain_signal[j] = ah->noise + rssi;
+		}
+		j++;
+	}
+
 	/*
 	 * Update Beacon RSSI, this is used by ANI.
 	 */
@@ -960,201 +969,6 @@ static void ath9k_process_tsf(struct ath_rx_status *rs,
 		rxs->mactime += 0x100000000ULL;
 }
 
-#ifdef CONFIG_ATH9K_DEBUGFS
-static s8 fix_rssi_inv_only(u8 rssi_val)
-{
-	if (rssi_val == 128)
-		rssi_val = 0;
-	return (s8) rssi_val;
-}
-#endif
-
-/* returns 1 if this was a spectral frame, even if not handled. */
-static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
-			   struct ath_rx_status *rs, u64 tsf)
-{
-#ifdef CONFIG_ATH9K_DEBUGFS
-	struct ath_hw *ah = sc->sc_ah;
-	u8 num_bins, *bins, *vdata = (u8 *)hdr;
-	struct fft_sample_ht20 fft_sample_20;
-	struct fft_sample_ht20_40 fft_sample_40;
-	struct fft_sample_tlv *tlv;
-	struct ath_radar_info *radar_info;
-	int len = rs->rs_datalen;
-	int dc_pos;
-	u16 fft_len, length, freq = ah->curchan->chan->center_freq;
-	enum nl80211_channel_type chan_type;
-
-	/* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
-	 * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
-	 * yet, but this is supposed to be possible as well.
-	 */
-	if (rs->rs_phyerr != ATH9K_PHYERR_RADAR &&
-	    rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT &&
-	    rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL)
-		return 0;
-
-	/* check if spectral scan bit is set. This does not have to be checked
-	 * if received through a SPECTRAL phy error, but shouldn't hurt.
-	 */
-	radar_info = ((struct ath_radar_info *)&vdata[len]) - 1;
-	if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
-		return 0;
-
-	chan_type = cfg80211_get_chandef_type(&sc->hw->conf.chandef);
-	if ((chan_type == NL80211_CHAN_HT40MINUS) ||
-	    (chan_type == NL80211_CHAN_HT40PLUS)) {
-		fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
-		num_bins = SPECTRAL_HT20_40_NUM_BINS;
-		bins = (u8 *)fft_sample_40.data;
-	} else {
-		fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
-		num_bins = SPECTRAL_HT20_NUM_BINS;
-		bins = (u8 *)fft_sample_20.data;
-	}
-
-	/* Variation in the data length is possible and will be fixed later */
-	if ((len > fft_len + 2) || (len < fft_len - 1))
-		return 1;
-
-	switch (len - fft_len) {
-	case 0:
-		/* length correct, nothing to do. */
-		memcpy(bins, vdata, num_bins);
-		break;
-	case -1:
-		/* first byte missing, duplicate it. */
-		memcpy(&bins[1], vdata, num_bins - 1);
-		bins[0] = vdata[0];
-		break;
-	case 2:
-		/* MAC added 2 extra bytes at bin 30 and 32, remove them. */
-		memcpy(bins, vdata, 30);
-		bins[30] = vdata[31];
-		memcpy(&bins[31], &vdata[33], num_bins - 31);
-		break;
-	case 1:
-		/* MAC added 2 extra bytes AND first byte is missing. */
-		bins[0] = vdata[0];
-		memcpy(&bins[1], vdata, 30);
-		bins[31] = vdata[31];
-		memcpy(&bins[32], &vdata[33], num_bins - 32);
-		break;
-	default:
-		return 1;
-	}
-
-	/* DC value (value in the middle) is the blind spot of the spectral
-	 * sample and invalid, interpolate it.
-	 */
-	dc_pos = num_bins / 2;
-	bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
-
-	if ((chan_type == NL80211_CHAN_HT40MINUS) ||
-	    (chan_type == NL80211_CHAN_HT40PLUS)) {
-		s8 lower_rssi, upper_rssi;
-		s16 ext_nf;
-		u8 lower_max_index, upper_max_index;
-		u8 lower_bitmap_w, upper_bitmap_w;
-		u16 lower_mag, upper_mag;
-		struct ath9k_hw_cal_data *caldata = ah->caldata;
-		struct ath_ht20_40_mag_info *mag_info;
-
-		if (caldata)
-			ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
-					caldata->nfCalHist[3].privNF);
-		else
-			ext_nf = ATH_DEFAULT_NOISE_FLOOR;
-
-		length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
-		fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
-		fft_sample_40.tlv.length = __cpu_to_be16(length);
-		fft_sample_40.freq = __cpu_to_be16(freq);
-		fft_sample_40.channel_type = chan_type;
-
-		if (chan_type == NL80211_CHAN_HT40PLUS) {
-			lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
-			upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
-
-			fft_sample_40.lower_noise = ah->noise;
-			fft_sample_40.upper_noise = ext_nf;
-		} else {
-			lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
-			upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
-
-			fft_sample_40.lower_noise = ext_nf;
-			fft_sample_40.upper_noise = ah->noise;
-		}
-		fft_sample_40.lower_rssi = lower_rssi;
-		fft_sample_40.upper_rssi = upper_rssi;
-
-		mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
-		lower_mag = spectral_max_magnitude(mag_info->lower_bins);
-		upper_mag = spectral_max_magnitude(mag_info->upper_bins);
-		fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
-		fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
-		lower_max_index = spectral_max_index(mag_info->lower_bins);
-		upper_max_index = spectral_max_index(mag_info->upper_bins);
-		fft_sample_40.lower_max_index = lower_max_index;
-		fft_sample_40.upper_max_index = upper_max_index;
-		lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
-		upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
-		fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
-		fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
-		fft_sample_40.max_exp = mag_info->max_exp & 0xf;
-
-		fft_sample_40.tsf = __cpu_to_be64(tsf);
-
-		tlv = (struct fft_sample_tlv *)&fft_sample_40;
-	} else {
-		u8 max_index, bitmap_w;
-		u16 magnitude;
-		struct ath_ht20_mag_info *mag_info;
-
-		length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
-		fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
-		fft_sample_20.tlv.length = __cpu_to_be16(length);
-		fft_sample_20.freq = __cpu_to_be16(freq);
-
-		fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
-		fft_sample_20.noise = ah->noise;
-
-		mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
-		magnitude = spectral_max_magnitude(mag_info->all_bins);
-		fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
-		max_index = spectral_max_index(mag_info->all_bins);
-		fft_sample_20.max_index = max_index;
-		bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
-		fft_sample_20.bitmap_weight = bitmap_w;
-		fft_sample_20.max_exp = mag_info->max_exp & 0xf;
-
-		fft_sample_20.tsf = __cpu_to_be64(tsf);
-
-		tlv = (struct fft_sample_tlv *)&fft_sample_20;
-	}
-
-	ath_debug_send_fft_sample(sc, tlv);
-	return 1;
-#else
-	return 0;
-#endif
-}
-
-static bool ath9k_is_mybeacon(struct ath_softc *sc, struct ieee80211_hdr *hdr)
-{
-	struct ath_hw *ah = sc->sc_ah;
-	struct ath_common *common = ath9k_hw_common(ah);
-
-	if (ieee80211_is_beacon(hdr->frame_control)) {
-		RX_STAT_INC(rx_beacons);
-		if (!is_zero_ether_addr(common->curbssid) &&
-		    ether_addr_equal(hdr->addr3, common->curbssid))
-			return true;
-	}
-
-	return false;
-}
-
 /*
  * For Decrypt or Demic errors, we only mark packet status here and always push
  * up the frame up to let mac80211 handle the actual error case, be it no
@@ -1242,10 +1056,17 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
 		goto exit;
 	}
 
-	rx_stats->is_mybeacon = ath9k_is_mybeacon(sc, hdr);
-	if (rx_stats->is_mybeacon) {
-		sc->hw_busy_count = 0;
-		ath_start_rx_poll(sc, 3);
+	if (ath_is_mybeacon(common, hdr)) {
+		RX_STAT_INC(rx_beacons);
+		rx_stats->is_mybeacon = true;
+	}
+
+	/*
+	 * This shouldn't happen, but have a safety check anyway.
+	 */
+	if (WARN_ON(!ah->curchan)) {
+		ret = -EINVAL;
+		goto exit;
 	}
 
 	if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
@@ -1255,8 +1076,8 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
 
 	ath9k_process_rssi(common, hw, rx_stats, rx_status);
 
-	rx_status->band = hw->conf.chandef.chan->band;
-	rx_status->freq = hw->conf.chandef.chan->center_freq;
+	rx_status->band = ah->curchan->chan->band;
+	rx_status->freq = ah->curchan->chan->center_freq;
 	rx_status->antenna = rx_stats->rs_antenna;
 	rx_status->flag |= RX_FLAG_MACTIME_END;
 
@@ -1521,8 +1342,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
 		spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
 
 		ath9k_antenna_check(sc, &rs);
-
 		ath9k_apply_ampdu_details(sc, &rs, rxs);
+		ath_debug_rate_stats(sc, &rs, skb);
 
 		ieee80211_rx(hw, skb);
 
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index a13b2d143d9e..b1fd3fa84983 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -304,6 +304,7 @@
 #define AR_IMR_S2              0x00ac
 #define AR_IMR_S2_QCU_TXURN    0x000003FF
 #define AR_IMR_S2_QCU_TXURN_S  0
+#define AR_IMR_S2_BB_WATCHDOG  0x00010000
 #define AR_IMR_S2_CST          0x00400000
 #define AR_IMR_S2_GTT          0x00800000
 #define AR_IMR_S2_TIM          0x01000000
@@ -809,7 +810,12 @@
 #define AR_SREV_REVISION_9462_21	3
 #define AR_SREV_VERSION_9565            0x2C0
 #define AR_SREV_REVISION_9565_10        0
+#define AR_SREV_REVISION_9565_101       1
+#define AR_SREV_REVISION_9565_11        2
 #define AR_SREV_VERSION_9550		0x400
+#define AR_SREV_VERSION_9531            0x500
+#define AR_SREV_REVISION_9531_10        0
+#define AR_SREV_REVISION_9531_11        1
 
 #define AR_SREV_5416(_ah) \
 	(((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -881,9 +887,6 @@
 
 #define AR_SREV_9330(_ah) \
 	(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9330))
-#define AR_SREV_9330_10(_ah) \
-	(AR_SREV_9330((_ah)) && \
-	 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9330_10))
 #define AR_SREV_9330_11(_ah) \
 	(AR_SREV_9330((_ah)) && \
 	 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9330_11))
@@ -927,10 +930,18 @@
 
 #define AR_SREV_9565(_ah) \
 	(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
-
 #define AR_SREV_9565_10(_ah) \
 	(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
 	 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_10))
+#define AR_SREV_9565_101(_ah) \
+	(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+	 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_101))
+#define AR_SREV_9565_11(_ah) \
+	(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+	 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_11))
+#define AR_SREV_9565_11_OR_LATER(_ah) \
+	(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+	 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9565_11))
 
 #define AR_SREV_9550(_ah) \
 	(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9550))
@@ -938,11 +949,19 @@
 #define AR_SREV_9580(_ah) \
 	(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9580) && \
 	((_ah)->hw_version.macRev >= AR_SREV_REVISION_9580_10))
-
 #define AR_SREV_9580_10(_ah) \
 	(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9580) && \
 	((_ah)->hw_version.macRev == AR_SREV_REVISION_9580_10))
 
+#define AR_SREV_9531(_ah) \
+	(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531))
+#define AR_SREV_9531_10(_ah) \
+	(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531) && \
+	 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9531_10))
+#define AR_SREV_9531_11(_ah) \
+	(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531) && \
+	 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9531_11))
+
 /* NOTE: When adding chips newer than Peacock, add chip check here */
 #define AR_SREV_9580_10_OR_LATER(_ah) \
 	(AR_SREV_9580(_ah))
diff --git a/drivers/net/wireless/ath/ath9k/spectral.c b/drivers/net/wireless/ath/ath9k/spectral.c
new file mode 100644
index 000000000000..99f4de95c264
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/spectral.c
@@ -0,0 +1,543 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/relay.h>
+#include "ath9k.h"
+
+static s8 fix_rssi_inv_only(u8 rssi_val)
+{
+	if (rssi_val == 128)
+		rssi_val = 0;
+	return (s8) rssi_val;
+}
+
+static void ath_debug_send_fft_sample(struct ath_softc *sc,
+				      struct fft_sample_tlv *fft_sample_tlv)
+{
+	int length;
+	if (!sc->rfs_chan_spec_scan)
+		return;
+
+	length = __be16_to_cpu(fft_sample_tlv->length) +
+		 sizeof(*fft_sample_tlv);
+	relay_write(sc->rfs_chan_spec_scan, fft_sample_tlv, length);
+}
+
+/* returns 1 if this was a spectral frame, even if not handled. */
+int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
+		    struct ath_rx_status *rs, u64 tsf)
+{
+	struct ath_hw *ah = sc->sc_ah;
+	u8 num_bins, *bins, *vdata = (u8 *)hdr;
+	struct fft_sample_ht20 fft_sample_20;
+	struct fft_sample_ht20_40 fft_sample_40;
+	struct fft_sample_tlv *tlv;
+	struct ath_radar_info *radar_info;
+	int len = rs->rs_datalen;
+	int dc_pos;
+	u16 fft_len, length, freq = ah->curchan->chan->center_freq;
+	enum nl80211_channel_type chan_type;
+
+	/* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
+	 * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
+	 * yet, but this is supposed to be possible as well.
+	 */
+	if (rs->rs_phyerr != ATH9K_PHYERR_RADAR &&
+	    rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT &&
+	    rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL)
+		return 0;
+
+	/* check if spectral scan bit is set. This does not have to be checked
+	 * if received through a SPECTRAL phy error, but shouldn't hurt.
+	 */
+	radar_info = ((struct ath_radar_info *)&vdata[len]) - 1;
+	if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
+		return 0;
+
+	chan_type = cfg80211_get_chandef_type(&sc->hw->conf.chandef);
+	if ((chan_type == NL80211_CHAN_HT40MINUS) ||
+	    (chan_type == NL80211_CHAN_HT40PLUS)) {
+		fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
+		num_bins = SPECTRAL_HT20_40_NUM_BINS;
+		bins = (u8 *)fft_sample_40.data;
+	} else {
+		fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
+		num_bins = SPECTRAL_HT20_NUM_BINS;
+		bins = (u8 *)fft_sample_20.data;
+	}
+
+	/* Variation in the data length is possible and will be fixed later */
+	if ((len > fft_len + 2) || (len < fft_len - 1))
+		return 1;
+
+	switch (len - fft_len) {
+	case 0:
+		/* length correct, nothing to do. */
+		memcpy(bins, vdata, num_bins);
+		break;
+	case -1:
+		/* first byte missing, duplicate it. */
+		memcpy(&bins[1], vdata, num_bins - 1);
+		bins[0] = vdata[0];
+		break;
+	case 2:
+		/* MAC added 2 extra bytes at bin 30 and 32, remove them. */
+		memcpy(bins, vdata, 30);
+		bins[30] = vdata[31];
+		memcpy(&bins[31], &vdata[33], num_bins - 31);
+		break;
+	case 1:
+		/* MAC added 2 extra bytes AND first byte is missing. */
+		bins[0] = vdata[0];
+		memcpy(&bins[1], vdata, 30);
+		bins[31] = vdata[31];
+		memcpy(&bins[32], &vdata[33], num_bins - 32);
+		break;
+	default:
+		return 1;
+	}
+
+	/* DC value (value in the middle) is the blind spot of the spectral
+	 * sample and invalid, interpolate it.
+	 */
+	dc_pos = num_bins / 2;
+	bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
+
+	if ((chan_type == NL80211_CHAN_HT40MINUS) ||
+	    (chan_type == NL80211_CHAN_HT40PLUS)) {
+		s8 lower_rssi, upper_rssi;
+		s16 ext_nf;
+		u8 lower_max_index, upper_max_index;
+		u8 lower_bitmap_w, upper_bitmap_w;
+		u16 lower_mag, upper_mag;
+		struct ath9k_hw_cal_data *caldata = ah->caldata;
+		struct ath_ht20_40_mag_info *mag_info;
+
+		if (caldata)
+			ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
+					caldata->nfCalHist[3].privNF);
+		else
+			ext_nf = ATH_DEFAULT_NOISE_FLOOR;
+
+		length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
+		fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
+		fft_sample_40.tlv.length = __cpu_to_be16(length);
+		fft_sample_40.freq = __cpu_to_be16(freq);
+		fft_sample_40.channel_type = chan_type;
+
+		if (chan_type == NL80211_CHAN_HT40PLUS) {
+			lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+			upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
+
+			fft_sample_40.lower_noise = ah->noise;
+			fft_sample_40.upper_noise = ext_nf;
+		} else {
+			lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
+			upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+
+			fft_sample_40.lower_noise = ext_nf;
+			fft_sample_40.upper_noise = ah->noise;
+		}
+		fft_sample_40.lower_rssi = lower_rssi;
+		fft_sample_40.upper_rssi = upper_rssi;
+
+		mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
+		lower_mag = spectral_max_magnitude(mag_info->lower_bins);
+		upper_mag = spectral_max_magnitude(mag_info->upper_bins);
+		fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
+		fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
+		lower_max_index = spectral_max_index(mag_info->lower_bins);
+		upper_max_index = spectral_max_index(mag_info->upper_bins);
+		fft_sample_40.lower_max_index = lower_max_index;
+		fft_sample_40.upper_max_index = upper_max_index;
+		lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
+		upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
+		fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
+		fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
+		fft_sample_40.max_exp = mag_info->max_exp & 0xf;
+
+		fft_sample_40.tsf = __cpu_to_be64(tsf);
+
+		tlv = (struct fft_sample_tlv *)&fft_sample_40;
+	} else {
+		u8 max_index, bitmap_w;
+		u16 magnitude;
+		struct ath_ht20_mag_info *mag_info;
+
+		length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
+		fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
+		fft_sample_20.tlv.length = __cpu_to_be16(length);
+		fft_sample_20.freq = __cpu_to_be16(freq);
+
+		fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+		fft_sample_20.noise = ah->noise;
+
+		mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
+		magnitude = spectral_max_magnitude(mag_info->all_bins);
+		fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
+		max_index = spectral_max_index(mag_info->all_bins);
+		fft_sample_20.max_index = max_index;
+		bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
+		fft_sample_20.bitmap_weight = bitmap_w;
+		fft_sample_20.max_exp = mag_info->max_exp & 0xf;
+
+		fft_sample_20.tsf = __cpu_to_be64(tsf);
+
+		tlv = (struct fft_sample_tlv *)&fft_sample_20;
+	}
+
+	ath_debug_send_fft_sample(sc, tlv);
+
+	return 1;
+}
+
+/*********************/
+/* spectral_scan_ctl */
+/*********************/
+
+static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
+				       size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	char *mode = "";
+	unsigned int len;
+
+	switch (sc->spectral_mode) {
+	case SPECTRAL_DISABLED:
+		mode = "disable";
+		break;
+	case SPECTRAL_BACKGROUND:
+		mode = "background";
+		break;
+	case SPECTRAL_CHANSCAN:
+		mode = "chanscan";
+		break;
+	case SPECTRAL_MANUAL:
+		mode = "manual";
+		break;
+	}
+	len = strlen(mode);
+	return simple_read_from_buffer(user_buf, count, ppos, mode, len);
+}
+
+static ssize_t write_file_spec_scan_ctl(struct file *file,
+					const char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+	char buf[32];
+	ssize_t len;
+
+	if (config_enabled(CONFIG_ATH9K_TX99))
+		return -EOPNOTSUPP;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+
+	if (strncmp("trigger", buf, 7) == 0) {
+		ath9k_spectral_scan_trigger(sc->hw);
+	} else if (strncmp("background", buf, 9) == 0) {
+		ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND);
+		ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n");
+	} else if (strncmp("chanscan", buf, 8) == 0) {
+		ath9k_spectral_scan_config(sc->hw, SPECTRAL_CHANSCAN);
+		ath_dbg(common, CONFIG, "spectral scan: channel scan mode enabled\n");
+	} else if (strncmp("manual", buf, 6) == 0) {
+		ath9k_spectral_scan_config(sc->hw, SPECTRAL_MANUAL);
+		ath_dbg(common, CONFIG, "spectral scan: manual mode enabled\n");
+	} else if (strncmp("disable", buf, 7) == 0) {
+		ath9k_spectral_scan_config(sc->hw, SPECTRAL_DISABLED);
+		ath_dbg(common, CONFIG, "spectral scan: disabled\n");
+	} else {
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static const struct file_operations fops_spec_scan_ctl = {
+	.read = read_file_spec_scan_ctl,
+	.write = write_file_spec_scan_ctl,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+/*************************/
+/* spectral_short_repeat */
+/*************************/
+
+static ssize_t read_file_spectral_short_repeat(struct file *file,
+					       char __user *user_buf,
+					       size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	char buf[32];
+	unsigned int len;
+
+	len = sprintf(buf, "%d\n", sc->spec_config.short_repeat);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_short_repeat(struct file *file,
+						const char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	unsigned long val;
+	char buf[32];
+	ssize_t len;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	if (kstrtoul(buf, 0, &val))
+		return -EINVAL;
+
+	if (val < 0 || val > 1)
+		return -EINVAL;
+
+	sc->spec_config.short_repeat = val;
+	return count;
+}
+
+static const struct file_operations fops_spectral_short_repeat = {
+	.read = read_file_spectral_short_repeat,
+	.write = write_file_spectral_short_repeat,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+/******************/
+/* spectral_count */
+/******************/
+
+static ssize_t read_file_spectral_count(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	char buf[32];
+	unsigned int len;
+
+	len = sprintf(buf, "%d\n", sc->spec_config.count);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_count(struct file *file,
+					 const char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	unsigned long val;
+	char buf[32];
+	ssize_t len;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	if (kstrtoul(buf, 0, &val))
+		return -EINVAL;
+
+	if (val < 0 || val > 255)
+		return -EINVAL;
+
+	sc->spec_config.count = val;
+	return count;
+}
+
+static const struct file_operations fops_spectral_count = {
+	.read = read_file_spectral_count,
+	.write = write_file_spectral_count,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+/*******************/
+/* spectral_period */
+/*******************/
+
+static ssize_t read_file_spectral_period(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	char buf[32];
+	unsigned int len;
+
+	len = sprintf(buf, "%d\n", sc->spec_config.period);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_period(struct file *file,
+					  const char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	unsigned long val;
+	char buf[32];
+	ssize_t len;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	if (kstrtoul(buf, 0, &val))
+		return -EINVAL;
+
+	if (val < 0 || val > 255)
+		return -EINVAL;
+
+	sc->spec_config.period = val;
+	return count;
+}
+
+static const struct file_operations fops_spectral_period = {
+	.read = read_file_spectral_period,
+	.write = write_file_spectral_period,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+/***********************/
+/* spectral_fft_period */
+/***********************/
+
+static ssize_t read_file_spectral_fft_period(struct file *file,
+					     char __user *user_buf,
+					     size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	char buf[32];
+	unsigned int len;
+
+	len = sprintf(buf, "%d\n", sc->spec_config.fft_period);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_fft_period(struct file *file,
+					      const char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	unsigned long val;
+	char buf[32];
+	ssize_t len;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	if (kstrtoul(buf, 0, &val))
+		return -EINVAL;
+
+	if (val < 0 || val > 15)
+		return -EINVAL;
+
+	sc->spec_config.fft_period = val;
+	return count;
+}
+
+static const struct file_operations fops_spectral_fft_period = {
+	.read = read_file_spectral_fft_period,
+	.write = write_file_spectral_fft_period,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+/*******************/
+/* Relay interface */
+/*******************/
+
+static struct dentry *create_buf_file_handler(const char *filename,
+					      struct dentry *parent,
+					      umode_t mode,
+					      struct rchan_buf *buf,
+					      int *is_global)
+{
+	struct dentry *buf_file;
+
+	buf_file = debugfs_create_file(filename, mode, parent, buf,
+				       &relay_file_operations);
+	*is_global = 1;
+	return buf_file;
+}
+
+static int remove_buf_file_handler(struct dentry *dentry)
+{
+	debugfs_remove(dentry);
+
+	return 0;
+}
+
+static struct rchan_callbacks rfs_spec_scan_cb = {
+	.create_buf_file = create_buf_file_handler,
+	.remove_buf_file = remove_buf_file_handler,
+};
+
+/*********************/
+/* Debug Init/Deinit */
+/*********************/
+
+void ath9k_spectral_deinit_debug(struct ath_softc *sc)
+{
+	if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
+		relay_close(sc->rfs_chan_spec_scan);
+		sc->rfs_chan_spec_scan = NULL;
+	}
+}
+
+void ath9k_spectral_init_debug(struct ath_softc *sc)
+{
+	sc->rfs_chan_spec_scan = relay_open("spectral_scan",
+					    sc->debug.debugfs_phy,
+					    1024, 256, &rfs_spec_scan_cb,
+					    NULL);
+	debugfs_create_file("spectral_scan_ctl",
+			    S_IRUSR | S_IWUSR,
+			    sc->debug.debugfs_phy, sc,
+			    &fops_spec_scan_ctl);
+	debugfs_create_file("spectral_short_repeat",
+			    S_IRUSR | S_IWUSR,
+			    sc->debug.debugfs_phy, sc,
+			    &fops_spectral_short_repeat);
+	debugfs_create_file("spectral_count",
+			    S_IRUSR | S_IWUSR,
+			    sc->debug.debugfs_phy, sc,
+			    &fops_spectral_count);
+	debugfs_create_file("spectral_period",
+			    S_IRUSR | S_IWUSR,
+			    sc->debug.debugfs_phy, sc,
+			    &fops_spectral_period);
+	debugfs_create_file("spectral_fft_period",
+			    S_IRUSR | S_IWUSR,
+			    sc->debug.debugfs_phy, sc,
+			    &fops_spectral_fft_period);
+}
diff --git a/drivers/net/wireless/ath/ath9k/spectral.h b/drivers/net/wireless/ath/ath9k/spectral.h
new file mode 100644
index 000000000000..ead63412ee1a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/spectral.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef SPECTRAL_H
+#define SPECTRAL_H
+
+/* enum spectral_mode:
+ *
+ * @SPECTRAL_DISABLED: spectral mode is disabled
+ * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
+ *	something else.
+ * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
+ *	is performed manually.
+ * @SPECTRAL_CHANSCAN: Like manual, but also triggered when changing channels
+ *	during a channel scan.
+ */
+enum spectral_mode {
+	SPECTRAL_DISABLED = 0,
+	SPECTRAL_BACKGROUND,
+	SPECTRAL_MANUAL,
+	SPECTRAL_CHANSCAN,
+};
+
+#define SPECTRAL_SCAN_BITMASK		0x10
+/* Radar info packet format, used for DFS and spectral formats. */
+struct ath_radar_info {
+	u8 pulse_length_pri;
+	u8 pulse_length_ext;
+	u8 pulse_bw_info;
+} __packed;
+
+/* The HT20 spectral data has 4 bytes of additional information at it's end.
+ *
+ * [7:0]: all bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: all bins  max_magnitude[9:2]
+ * [7:0]: all bins {max_index[5:0], max_magnitude[11:10]}
+ * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
+ */
+struct ath_ht20_mag_info {
+	u8 all_bins[3];
+	u8 max_exp;
+} __packed;
+
+#define SPECTRAL_HT20_NUM_BINS		56
+
+/* WARNING: don't actually use this struct! MAC may vary the amount of
+ * data by -1/+2. This struct is for reference only.
+ */
+struct ath_ht20_fft_packet {
+	u8 data[SPECTRAL_HT20_NUM_BINS];
+	struct ath_ht20_mag_info mag_info;
+	struct ath_radar_info radar_info;
+} __packed;
+
+#define SPECTRAL_HT20_TOTAL_DATA_LEN	(sizeof(struct ath_ht20_fft_packet))
+
+/* Dynamic 20/40 mode:
+ *
+ * [7:0]: lower bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: lower bins  max_magnitude[9:2]
+ * [7:0]: lower bins {max_index[5:0], max_magnitude[11:10]}
+ * [7:0]: upper bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: upper bins  max_magnitude[9:2]
+ * [7:0]: upper bins {max_index[5:0], max_magnitude[11:10]}
+ * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
+ */
+struct ath_ht20_40_mag_info {
+	u8 lower_bins[3];
+	u8 upper_bins[3];
+	u8 max_exp;
+} __packed;
+
+#define SPECTRAL_HT20_40_NUM_BINS		128
+
+/* WARNING: don't actually use this struct! MAC may vary the amount of
+ * data. This struct is for reference only.
+ */
+struct ath_ht20_40_fft_packet {
+	u8 data[SPECTRAL_HT20_40_NUM_BINS];
+	struct ath_ht20_40_mag_info mag_info;
+	struct ath_radar_info radar_info;
+} __packed;
+
+
+#define SPECTRAL_HT20_40_TOTAL_DATA_LEN	(sizeof(struct ath_ht20_40_fft_packet))
+
+/* grabs the max magnitude from the all/upper/lower bins */
+static inline u16 spectral_max_magnitude(u8 *bins)
+{
+	return (bins[0] & 0xc0) >> 6 |
+	       (bins[1] & 0xff) << 2 |
+	       (bins[2] & 0x03) << 10;
+}
+
+/* return the max magnitude from the all/upper/lower bins */
+static inline u8 spectral_max_index(u8 *bins)
+{
+	s8 m = (bins[2] & 0xfc) >> 2;
+
+	/* TODO: this still doesn't always report the right values ... */
+	if (m > 32)
+		m |= 0xe0;
+	else
+		m &= ~0xe0;
+
+	return m + 29;
+}
+
+/* return the bitmap weight from the all/upper/lower bins */
+static inline u8 spectral_bitmap_weight(u8 *bins)
+{
+	return bins[0] & 0x3f;
+}
+
+/* FFT sample format given to userspace via debugfs.
+ *
+ * Please keep the type/length at the front position and change
+ * other fields after adding another sample type
+ *
+ * TODO: this might need rework when switching to nl80211-based
+ * interface.
+ */
+enum ath_fft_sample_type {
+	ATH_FFT_SAMPLE_HT20 = 1,
+	ATH_FFT_SAMPLE_HT20_40,
+};
+
+struct fft_sample_tlv {
+	u8 type;	/* see ath_fft_sample */
+	__be16 length;
+	/* type dependent data follows */
+} __packed;
+
+struct fft_sample_ht20 {
+	struct fft_sample_tlv tlv;
+
+	u8 max_exp;
+
+	__be16 freq;
+	s8 rssi;
+	s8 noise;
+
+	__be16 max_magnitude;
+	u8 max_index;
+	u8 bitmap_weight;
+
+	__be64 tsf;
+
+	u8 data[SPECTRAL_HT20_NUM_BINS];
+} __packed;
+
+struct fft_sample_ht20_40 {
+	struct fft_sample_tlv tlv;
+
+	u8 channel_type;
+	__be16 freq;
+
+	s8 lower_rssi;
+	s8 upper_rssi;
+
+	__be64 tsf;
+
+	s8 lower_noise;
+	s8 upper_noise;
+
+	__be16 lower_max_magnitude;
+	__be16 upper_max_magnitude;
+
+	u8 lower_max_index;
+	u8 upper_max_index;
+
+	u8 lower_bitmap_weight;
+	u8 upper_bitmap_weight;
+
+	u8 max_exp;
+
+	u8 data[SPECTRAL_HT20_40_NUM_BINS];
+} __packed;
+
+void ath9k_spectral_init_debug(struct ath_softc *sc);
+void ath9k_spectral_deinit_debug(struct ath_softc *sc);
+
+void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw);
+int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
+			       enum spectral_mode spectral_mode);
+
+#ifdef CONFIG_ATH9K_DEBUGFS
+int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
+		    struct ath_rx_status *rs, u64 tsf);
+#else
+static inline int ath_process_fft(struct ath_softc *sc,
+				  struct ieee80211_hdr *hdr,
+				  struct ath_rx_status *rs, u64 tsf)
+{
+	return 0;
+}
+#endif /* CONFIG_ATH9K_DEBUGFS */
+
+#endif /* SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
new file mode 100644
index 000000000000..b686a7498450
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+static void ath9k_tx99_stop(struct ath_softc *sc)
+{
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+
+	ath_drain_all_txq(sc);
+	ath_startrecv(sc);
+
+	ath9k_hw_set_interrupts(ah);
+	ath9k_hw_enable_interrupts(ah);
+
+	ieee80211_wake_queues(sc->hw);
+
+	kfree_skb(sc->tx99_skb);
+	sc->tx99_skb = NULL;
+	sc->tx99_state = false;
+
+	ath9k_hw_tx99_stop(sc->sc_ah);
+	ath_dbg(common, XMIT, "TX99 stopped\n");
+}
+
+static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
+{
+	static u8 PN9Data[] = {0xff, 0x87, 0xb8, 0x59, 0xb7, 0xa1, 0xcc, 0x24,
+			       0x57, 0x5e, 0x4b, 0x9c, 0x0e, 0xe9, 0xea, 0x50,
+			       0x2a, 0xbe, 0xb4, 0x1b, 0xb6, 0xb0, 0x5d, 0xf1,
+			       0xe6, 0x9a, 0xe3, 0x45, 0xfd, 0x2c, 0x53, 0x18,
+			       0x0c, 0xca, 0xc9, 0xfb, 0x49, 0x37, 0xe5, 0xa8,
+			       0x51, 0x3b, 0x2f, 0x61, 0xaa, 0x72, 0x18, 0x84,
+			       0x02, 0x23, 0x23, 0xab, 0x63, 0x89, 0x51, 0xb3,
+			       0xe7, 0x8b, 0x72, 0x90, 0x4c, 0xe8, 0xfb, 0xc0};
+	u32 len = 1200;
+	struct ieee80211_tx_rate *rate;
+	struct ieee80211_hw *hw = sc->hw;
+	struct ath_hw *ah = sc->sc_ah;
+	struct ieee80211_hdr *hdr;
+	struct ieee80211_tx_info *tx_info;
+	struct sk_buff *skb;
+
+	skb = alloc_skb(len, GFP_KERNEL);
+	if (!skb)
+		return NULL;
+
+	skb_put(skb, len);
+
+	memset(skb->data, 0, len);
+
+	hdr = (struct ieee80211_hdr *)skb->data;
+	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA);
+	hdr->duration_id = 0;
+
+	memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
+	memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
+	memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
+
+	hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
+
+	tx_info = IEEE80211_SKB_CB(skb);
+	memset(tx_info, 0, sizeof(*tx_info));
+	rate = &tx_info->control.rates[0];
+	tx_info->band = hw->conf.chandef.chan->band;
+	tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
+	tx_info->control.vif = sc->tx99_vif;
+	rate->count = 1;
+	if (ah->curchan && IS_CHAN_HT(ah->curchan)) {
+		rate->flags |= IEEE80211_TX_RC_MCS;
+		if (IS_CHAN_HT40(ah->curchan))
+			rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+	}
+
+	memcpy(skb->data + sizeof(*hdr), PN9Data, sizeof(PN9Data));
+
+	return skb;
+}
+
+static void ath9k_tx99_deinit(struct ath_softc *sc)
+{
+	ath_reset(sc);
+
+	ath9k_ps_wakeup(sc);
+	ath9k_tx99_stop(sc);
+	ath9k_ps_restore(sc);
+}
+
+static int ath9k_tx99_init(struct ath_softc *sc)
+{
+	struct ieee80211_hw *hw = sc->hw;
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+	struct ath_tx_control txctl;
+	int r;
+
+	if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+		ath_err(common,
+			"driver is in invalid state unable to use TX99");
+		return -EINVAL;
+	}
+
+	sc->tx99_skb = ath9k_build_tx99_skb(sc);
+	if (!sc->tx99_skb)
+		return -ENOMEM;
+
+	memset(&txctl, 0, sizeof(txctl));
+	txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
+
+	ath_reset(sc);
+
+	ath9k_ps_wakeup(sc);
+
+	ath9k_hw_disable_interrupts(ah);
+	atomic_set(&ah->intr_ref_cnt, -1);
+	ath_drain_all_txq(sc);
+	ath_stoprecv(sc);
+
+	sc->tx99_state = true;
+
+	ieee80211_stop_queues(hw);
+
+	if (sc->tx99_power == MAX_RATE_POWER + 1)
+		sc->tx99_power = MAX_RATE_POWER;
+
+	ath9k_hw_tx99_set_txpower(ah, sc->tx99_power);
+	r = ath9k_tx99_send(sc, sc->tx99_skb, &txctl);
+	if (r) {
+		ath_dbg(common, XMIT, "Failed to xmit TX99 skb\n");
+		return r;
+	}
+
+	ath_dbg(common, XMIT, "TX99 xmit started using %d ( %ddBm)\n",
+		sc->tx99_power,
+		sc->tx99_power / 2);
+
+	/* We leave the harware awake as it will be chugging on */
+
+	return 0;
+}
+
+static ssize_t read_file_tx99(struct file *file, char __user *user_buf,
+			      size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	char buf[3];
+	unsigned int len;
+
+	len = sprintf(buf, "%d\n", sc->tx99_state);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
+			       size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+	char buf[32];
+	bool start;
+	ssize_t len;
+	int r;
+
+	if (sc->nvifs > 1)
+		return -EOPNOTSUPP;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	if (strtobool(buf, &start))
+		return -EINVAL;
+
+	if (start == sc->tx99_state) {
+		if (!start)
+			return count;
+		ath_dbg(common, XMIT, "Resetting TX99\n");
+		ath9k_tx99_deinit(sc);
+	}
+
+	if (!start) {
+		ath9k_tx99_deinit(sc);
+		return count;
+	}
+
+	r = ath9k_tx99_init(sc);
+	if (r)
+		return r;
+
+	return count;
+}
+
+static const struct file_operations fops_tx99 = {
+	.read = read_file_tx99,
+	.write = write_file_tx99,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t read_file_tx99_power(struct file *file,
+				    char __user *user_buf,
+				    size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	char buf[32];
+	unsigned int len;
+
+	len = sprintf(buf, "%d (%d dBm)\n",
+		      sc->tx99_power,
+		      sc->tx99_power / 2);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx99_power(struct file *file,
+				     const char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	int r;
+	u8 tx_power;
+
+	r = kstrtou8_from_user(user_buf, count, 0, &tx_power);
+	if (r)
+		return r;
+
+	if (tx_power > MAX_RATE_POWER)
+		return -EINVAL;
+
+	sc->tx99_power = tx_power;
+
+	ath9k_ps_wakeup(sc);
+	ath9k_hw_tx99_set_txpower(sc->sc_ah, sc->tx99_power);
+	ath9k_ps_restore(sc);
+
+	return count;
+}
+
+static const struct file_operations fops_tx99_power = {
+	.read = read_file_tx99_power,
+	.write = write_file_tx99_power,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+void ath9k_tx99_init_debug(struct ath_softc *sc)
+{
+	if (!AR_SREV_9300_20_OR_LATER(sc->sc_ah))
+		return;
+
+	debugfs_create_file("tx99", S_IRUSR | S_IWUSR,
+			    sc->debug.debugfs_phy, sc,
+			    &fops_tx99);
+	debugfs_create_file("tx99_power", S_IRUSR | S_IWUSR,
+			    sc->debug.debugfs_phy, sc,
+			    &fops_tx99_power);
+}
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index 81c88dd606dc..1b3230fa3651 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -14,409 +14,347 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include <linux/export.h>
 #include "ath9k.h"
-#include "reg.h"
-#include "hw-ops.h"
 
-const char *ath9k_hw_wow_event_to_string(u32 wow_event)
-{
-	if (wow_event & AH_WOW_MAGIC_PATTERN_EN)
-		return "Magic pattern";
-	if (wow_event & AH_WOW_USER_PATTERN_EN)
-		return "User pattern";
-	if (wow_event & AH_WOW_LINK_CHANGE)
-		return "Link change";
-	if (wow_event & AH_WOW_BEACON_MISS)
-		return "Beacon miss";
-
-	return  "unknown reason";
-}
-EXPORT_SYMBOL(ath9k_hw_wow_event_to_string);
+static const struct wiphy_wowlan_support ath9k_wowlan_support = {
+	.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+	.n_patterns = MAX_NUM_USER_PATTERN,
+	.pattern_min_len = 1,
+	.pattern_max_len = MAX_PATTERN_SIZE,
+};
 
-static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
+static void ath9k_wow_map_triggers(struct ath_softc *sc,
+				   struct cfg80211_wowlan *wowlan,
+				   u32 *wow_triggers)
 {
-	struct ath_common *common = ath9k_hw_common(ah);
+	if (wowlan->disconnect)
+		*wow_triggers |= AH_WOW_LINK_CHANGE |
+				 AH_WOW_BEACON_MISS;
+	if (wowlan->magic_pkt)
+		*wow_triggers |= AH_WOW_MAGIC_PATTERN_EN;
 
-	REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+	if (wowlan->n_patterns)
+		*wow_triggers |= AH_WOW_USER_PATTERN_EN;
 
-	/* set rx disable bit */
-	REG_WRITE(ah, AR_CR, AR_CR_RXD);
-
-	if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0, AH_WAIT_TIMEOUT)) {
-		ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
-			REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
-		return;
-	}
+	sc->wow_enabled = *wow_triggers;
 
-	REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
 }
 
-static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
+static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
 {
+	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
-	u8 sta_mac_addr[ETH_ALEN], ap_mac_addr[ETH_ALEN];
-	u32 ctl[13] = {0};
-	u32 data_word[KAL_NUM_DATA_WORDS];
-	u8 i;
-	u32 wow_ka_data_word0;
-
-	memcpy(sta_mac_addr, common->macaddr, ETH_ALEN);
-	memcpy(ap_mac_addr, common->curbssid, ETH_ALEN);
-
-	/* set the transmit buffer */
-	ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16));
-	ctl[1] = 0;
-	ctl[3] = 0xb;	/* OFDM_6M hardware value for this rate */
-	ctl[4] = 0;
-	ctl[7] = (ah->txchainmask) << 2;
-	ctl[2] = 0xf << 16; /* tx_tries 0 */
-
-	for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
-		REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
-
-	REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
-
-	data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
-		       (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
-	data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
-		       (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
-	data_word[2] = (sta_mac_addr[1] << 24) | (sta_mac_addr[0] << 16) |
-		       (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
-	data_word[3] = (sta_mac_addr[5] << 24) | (sta_mac_addr[4] << 16) |
-		       (sta_mac_addr[3] << 8) | (sta_mac_addr[2]);
-	data_word[4] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
-		       (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
-	data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
-
-	if (AR_SREV_9462_20(ah)) {
-		/* AR9462 2.0 has an extra descriptor word (time based
-		 * discard) compared to other chips */
-		REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
-		wow_ka_data_word0 = AR_WOW_TXBUF(13);
-	} else {
-		wow_ka_data_word0 = AR_WOW_TXBUF(12);
-	}
-
-	for (i = 0; i < KAL_NUM_DATA_WORDS; i++)
-		REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]);
-
-}
-
-void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
-				u8 *user_mask, int pattern_count,
-				int pattern_len)
-{
-	int i;
-	u32 pattern_val, mask_val;
-	u32 set, clr;
+	int pattern_count = 0;
+	int i, byte_cnt;
+	u8 dis_deauth_pattern[MAX_PATTERN_SIZE];
+	u8 dis_deauth_mask[MAX_PATTERN_SIZE];
 
-	/* FIXME: should check count by querying the hardware capability */
-	if (pattern_count >= MAX_NUM_PATTERN)
-		return;
-
-	REG_SET_BIT(ah, AR_WOW_PATTERN, BIT(pattern_count));
-
-	/* set the registers for pattern */
-	for (i = 0; i < MAX_PATTERN_SIZE; i += 4) {
-		memcpy(&pattern_val, user_pattern, 4);
-		REG_WRITE(ah, (AR_WOW_TB_PATTERN(pattern_count) + i),
-			  pattern_val);
-		user_pattern += 4;
-	}
-
-	/* set the registers for mask */
-	for (i = 0; i < MAX_PATTERN_MASK_SIZE; i += 4) {
-		memcpy(&mask_val, user_mask, 4);
-		REG_WRITE(ah, (AR_WOW_TB_MASK(pattern_count) + i), mask_val);
-		user_mask += 4;
-	}
+	memset(dis_deauth_pattern, 0, MAX_PATTERN_SIZE);
+	memset(dis_deauth_mask, 0, MAX_PATTERN_SIZE);
 
-	/* set the pattern length to be matched
+	/*
+	 * Create Dissassociate / Deauthenticate packet filter
+	 *
+	 *     2 bytes        2 byte    6 bytes   6 bytes  6 bytes
+	 *  +--------------+----------+---------+--------+--------+----
+	 *  + Frame Control+ Duration +   DA    +  SA    +  BSSID +
+	 *  +--------------+----------+---------+--------+--------+----
 	 *
-	 * AR_WOW_LENGTH1_REG1
-	 * bit 31:24 pattern 0 length
-	 * bit 23:16 pattern 1 length
-	 * bit 15:8 pattern 2 length
-	 * bit 7:0 pattern 3 length
+	 * The above is the management frame format for disassociate/
+	 * deauthenticate pattern, from this we need to match the first byte
+	 * of 'Frame Control' and DA, SA, and BSSID fields
+	 * (skipping 2nd byte of FC and Duration feild.
 	 *
-	 * AR_WOW_LENGTH1_REG2
-	 * bit 31:24 pattern 4 length
-	 * bit 23:16 pattern 5 length
-	 * bit 15:8 pattern 6 length
-	 * bit 7:0 pattern 7 length
+	 * Disassociate pattern
+	 * --------------------
+	 * Frame control = 00 00 1010
+	 * DA, SA, BSSID = x:x:x:x:x:x
+	 * Pattern will be A0000000 | x:x:x:x:x:x | x:x:x:x:x:x
+	 *			    | x:x:x:x:x:x  -- 22 bytes
 	 *
-	 * the below logic writes out the new
-	 * pattern length for the corresponding
-	 * pattern_count, while masking out the
-	 * other fields
+	 * Deauthenticate pattern
+	 * ----------------------
+	 * Frame control = 00 00 1100
+	 * DA, SA, BSSID = x:x:x:x:x:x
+	 * Pattern will be C0000000 | x:x:x:x:x:x | x:x:x:x:x:x
+	 *			    | x:x:x:x:x:x  -- 22 bytes
 	 */
 
-	ah->wow_event_mask |= BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
-
-	if (pattern_count < 4) {
-		/* Pattern 0-3 uses AR_WOW_LENGTH1 register */
-		set = (pattern_len & AR_WOW_LENGTH_MAX) <<
-		       AR_WOW_LEN1_SHIFT(pattern_count);
-		clr = AR_WOW_LENGTH1_MASK(pattern_count);
-		REG_RMW(ah, AR_WOW_LENGTH1, set, clr);
-	} else {
-		/* Pattern 4-7 uses AR_WOW_LENGTH2 register */
-		set = (pattern_len & AR_WOW_LENGTH_MAX) <<
-		       AR_WOW_LEN2_SHIFT(pattern_count);
-		clr = AR_WOW_LENGTH2_MASK(pattern_count);
-		REG_RMW(ah, AR_WOW_LENGTH2, set, clr);
-	}
+	/* Create Disassociate Pattern first */
 
-}
-EXPORT_SYMBOL(ath9k_hw_wow_apply_pattern);
+	byte_cnt = 0;
 
-u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
-{
-	u32 wow_status = 0;
-	u32 val = 0, rval;
+	/* Fill out the mask with all FF's */
 
-	/*
-	 * read the WoW status register to know
-	 * the wakeup reason
-	 */
-	rval = REG_READ(ah, AR_WOW_PATTERN);
-	val = AR_WOW_STATUS(rval);
+	for (i = 0; i < MAX_PATTERN_MASK_SIZE; i++)
+		dis_deauth_mask[i] = 0xff;
 
-	/*
-	 * mask only the WoW events that we have enabled. Sometimes
-	 * we have spurious WoW events from the AR_WOW_PATTERN
-	 * register. This mask will clean it up.
-	 */
+	/* copy the first byte of frame control field */
+	dis_deauth_pattern[byte_cnt] = 0xa0;
+	byte_cnt++;
 
-	val &= ah->wow_event_mask;
-
-	if (val) {
-		if (val & AR_WOW_MAGIC_PAT_FOUND)
-			wow_status |= AH_WOW_MAGIC_PATTERN_EN;
-		if (AR_WOW_PATTERN_FOUND(val))
-			wow_status |= AH_WOW_USER_PATTERN_EN;
-		if (val & AR_WOW_KEEP_ALIVE_FAIL)
-			wow_status |= AH_WOW_LINK_CHANGE;
-		if (val & AR_WOW_BEACON_FAIL)
-			wow_status |= AH_WOW_BEACON_MISS;
-	}
+	/* skip 2nd byte of frame control and Duration field */
+	byte_cnt += 3;
 
 	/*
-	 * set and clear WOW_PME_CLEAR registers for the chip to
-	 * generate next wow signal.
-	 * disable D3 before accessing other registers ?
+	 * need not match the destination mac address, it can be a broadcast
+	 * mac address or an unicast to this station
 	 */
+	byte_cnt += 6;
 
-	/* do we need to check the bit value 0x01000000 (7-10) ?? */
-	REG_RMW(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR,
-		AR_PMCTRL_PWR_STATE_D1D3);
+	/* copy the source mac address */
+	memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
 
-	/*
-	 * clear all events
-	 */
-	REG_WRITE(ah, AR_WOW_PATTERN,
-		  AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
+	byte_cnt += 6;
 
-	/*
-	 * restore the beacon threshold to init value
-	 */
-	REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
+	/* copy the bssid, its same as the source mac address */
+
+	memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
+
+	/* Create Disassociate pattern mask */
+
+	dis_deauth_mask[0] = 0xfe;
+	dis_deauth_mask[1] = 0x03;
+	dis_deauth_mask[2] = 0xc0;
+
+	ath_dbg(common, WOW, "Adding disassoc/deauth patterns for WoW\n");
 
+	ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
+				   pattern_count, byte_cnt);
+
+	pattern_count++;
 	/*
-	 * Restore the way the PCI-E reset, Power-On-Reset, external
-	 * PCIE_POR_SHORT pins are tied to its original value.
-	 * Previously just before WoW sleep, we untie the PCI-E
-	 * reset to our Chip's Power On Reset so that any PCI-E
-	 * reset from the bus will not reset our chip
+	 * for de-authenticate pattern, only the first byte of the frame
+	 * control field gets changed from 0xA0 to 0xC0
 	 */
-	if (ah->is_pciexpress)
-		ath9k_hw_configpcipowersave(ah, false);
+	dis_deauth_pattern[0] = 0xC0;
 
-	ah->wow_event_mask = 0;
+	ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
+				   pattern_count, byte_cnt);
 
-	return wow_status;
 }
-EXPORT_SYMBOL(ath9k_hw_wow_wakeup);
 
-void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
+static void ath9k_wow_add_pattern(struct ath_softc *sc,
+				  struct cfg80211_wowlan *wowlan)
 {
-	u32 wow_event_mask;
-	u32 set, clr;
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath9k_wow_pattern *wow_pattern = NULL;
+	struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+	int mask_len;
+	s8 i = 0;
 
-	/*
-	 * wow_event_mask is a mask to the AR_WOW_PATTERN register to
-	 * indicate which WoW events we have enabled. The WoW events
-	 * are from the 'pattern_enable' in this function and
-	 * 'pattern_count' of ath9k_hw_wow_apply_pattern()
-	 */
-	wow_event_mask = ah->wow_event_mask;
+	if (!wowlan->n_patterns)
+		return;
 
 	/*
-	 * Untie Power-on-Reset from the PCI-E-Reset. When we are in
-	 * WOW sleep, we do want the Reset from the PCI-E to disturb
-	 * our hw state
+	 * Add the new user configured patterns
 	 */
-	if (ah->is_pciexpress) {
+	for (i = 0; i < wowlan->n_patterns; i++) {
+
+		wow_pattern = kzalloc(sizeof(*wow_pattern), GFP_KERNEL);
+
+		if (!wow_pattern)
+			return;
+
+		/*
+		 * TODO: convert the generic user space pattern to
+		 * appropriate chip specific/802.11 pattern.
+		 */
+
+		mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+		memset(wow_pattern->pattern_bytes, 0, MAX_PATTERN_SIZE);
+		memset(wow_pattern->mask_bytes, 0, MAX_PATTERN_SIZE);
+		memcpy(wow_pattern->pattern_bytes, patterns[i].pattern,
+		       patterns[i].pattern_len);
+		memcpy(wow_pattern->mask_bytes, patterns[i].mask, mask_len);
+		wow_pattern->pattern_len = patterns[i].pattern_len;
+
 		/*
-		 * we need to untie the internal POR (power-on-reset)
-		 * to the external PCI-E reset. We also need to tie
-		 * the PCI-E Phy reset to the PCI-E reset.
+		 * just need to take care of deauth and disssoc pattern,
+		 * make sure we don't overwrite them.
 		 */
-		set = AR_WA_RESET_EN | AR_WA_POR_SHORT;
-		clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE;
-		REG_RMW(ah, AR_WA, set, clr);
+
+		ath9k_hw_wow_apply_pattern(ah, wow_pattern->pattern_bytes,
+					   wow_pattern->mask_bytes,
+					   i + 2,
+					   wow_pattern->pattern_len);
+		kfree(wow_pattern);
+
 	}
 
-	/*
-	 * set the power states appropriately and enable PME
-	 */
-	set = AR_PMCTRL_HOST_PME_EN | AR_PMCTRL_PWR_PM_CTRL_ENA |
-	      AR_PMCTRL_AUX_PWR_DET | AR_PMCTRL_WOW_PME_CLR;
+}
 
-	/*
-	 * set and clear WOW_PME_CLEAR registers for the chip
-	 * to generate next wow signal.
-	 */
-	REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
-	clr = AR_PMCTRL_WOW_PME_CLR;
-	REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
+int ath9k_suspend(struct ieee80211_hw *hw,
+		  struct cfg80211_wowlan *wowlan)
+{
+	struct ath_softc *sc = hw->priv;
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+	u32 wow_triggers_enabled = 0;
+	int ret = 0;
 
-	/*
-	 * Setup for:
-	 *	- beacon misses
-	 *	- magic pattern
-	 *	- keep alive timeout
-	 *	- pattern matching
-	 */
+	mutex_lock(&sc->mutex);
 
-	/*
-	 * Program default values for pattern backoff, aifs/slot/KAL count,
-	 * beacon miss timeout, KAL timeout, etc.
-	 */
-	set = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF);
-	REG_SET_BIT(ah, AR_WOW_PATTERN, set);
+	ath_cancel_work(sc);
+	ath_stop_ani(sc);
 
-	set = AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) |
-	      AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) |
-	      AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT);
-	REG_SET_BIT(ah, AR_WOW_COUNT, set);
+	if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+		ath_dbg(common, ANY, "Device not present\n");
+		ret = -EINVAL;
+		goto fail_wow;
+	}
 
-	if (pattern_enable & AH_WOW_BEACON_MISS)
-		set = AR_WOW_BEACON_TIMO;
-	/* We are not using beacon miss, program a large value */
-	else
-		set = AR_WOW_BEACON_TIMO_MAX;
+	if (WARN_ON(!wowlan)) {
+		ath_dbg(common, WOW, "None of the WoW triggers enabled\n");
+		ret = -EINVAL;
+		goto fail_wow;
+	}
 
-	REG_WRITE(ah, AR_WOW_BCN_TIMO, set);
+	if (!device_can_wakeup(sc->dev)) {
+		ath_dbg(common, WOW, "device_can_wakeup failed, WoW is not enabled\n");
+		ret = 1;
+		goto fail_wow;
+	}
 
 	/*
-	 * Keep alive timo in ms except AR9280
+	 * none of the sta vifs are associated
+	 * and we are not currently handling multivif
+	 * cases, for instance we have to seperately
+	 * configure 'keep alive frame' for each
+	 * STA.
 	 */
-	if (!pattern_enable)
-		set = AR_WOW_KEEP_ALIVE_NEVER;
-	else
-		set = KAL_TIMEOUT * 32;
 
-	REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, set);
+	if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
+		ath_dbg(common, WOW, "None of the STA vifs are associated\n");
+		ret = 1;
+		goto fail_wow;
+	}
+
+	if (sc->nvifs > 1) {
+		ath_dbg(common, WOW, "WoW for multivif is not yet supported\n");
+		ret = 1;
+		goto fail_wow;
+	}
+
+	ath9k_wow_map_triggers(sc, wowlan, &wow_triggers_enabled);
+
+	ath_dbg(common, WOW, "WoW triggers enabled 0x%x\n",
+		wow_triggers_enabled);
+
+	ath9k_ps_wakeup(sc);
+
+	ath9k_stop_btcoex(sc);
 
 	/*
-	 * Keep alive delay in us. based on 'power on clock',
-	 * therefore in usec
+	 * Enable wake up on recieving disassoc/deauth
+	 * frame by default.
 	 */
-	set = KAL_DELAY * 1000;
-	REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY, set);
+	ath9k_wow_add_disassoc_deauth_pattern(sc);
+
+	if (wow_triggers_enabled & AH_WOW_USER_PATTERN_EN)
+		ath9k_wow_add_pattern(sc, wowlan);
 
+	spin_lock_bh(&sc->sc_pcu_lock);
 	/*
-	 * Create keep alive pattern to respond to beacons
+	 * To avoid false wake, we enable beacon miss interrupt only
+	 * when we go to sleep. We save the current interrupt mask
+	 * so we can restore it after the system wakes up
 	 */
-	ath9k_wow_create_keep_alive_pattern(ah);
+	sc->wow_intr_before_sleep = ah->imask;
+	ah->imask &= ~ATH9K_INT_GLOBAL;
+	ath9k_hw_disable_interrupts(ah);
+	ah->imask = ATH9K_INT_BMISS | ATH9K_INT_GLOBAL;
+	ath9k_hw_set_interrupts(ah);
+	ath9k_hw_enable_interrupts(ah);
+
+	spin_unlock_bh(&sc->sc_pcu_lock);
 
 	/*
-	 * Configure MAC WoW Registers
+	 * we can now sync irq and kill any running tasklets, since we already
+	 * disabled interrupts and not holding a spin lock
 	 */
-	set = 0;
-	/* Send keep alive timeouts anyway */
-	clr = AR_WOW_KEEP_ALIVE_AUTO_DIS;
+	synchronize_irq(sc->irq);
+	tasklet_kill(&sc->intr_tq);
 
-	if (pattern_enable & AH_WOW_LINK_CHANGE)
-		wow_event_mask |= AR_WOW_KEEP_ALIVE_FAIL;
-	else
-		set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
+	ath9k_hw_wow_enable(ah, wow_triggers_enabled);
 
-	set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
-	REG_RMW(ah, AR_WOW_KEEP_ALIVE, set, clr);
+	ath9k_ps_restore(sc);
+	ath_dbg(common, ANY, "WoW enabled in ath9k\n");
+	atomic_inc(&sc->wow_sleep_proc_intr);
 
-	/*
-	 * we are relying on a bmiss failure. ensure we have
-	 * enough threshold to prevent false positives
-	 */
-	REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR,
-		      AR_WOW_BMISSTHRESHOLD);
+fail_wow:
+	mutex_unlock(&sc->mutex);
+	return ret;
+}
+
+int ath9k_resume(struct ieee80211_hw *hw)
+{
+	struct ath_softc *sc = hw->priv;
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+	u32 wow_status;
 
-	set = 0;
-	clr = 0;
+	mutex_lock(&sc->mutex);
 
-	if (pattern_enable & AH_WOW_BEACON_MISS) {
-		set = AR_WOW_BEACON_FAIL_EN;
-		wow_event_mask |= AR_WOW_BEACON_FAIL;
-	} else {
-		clr = AR_WOW_BEACON_FAIL_EN;
+	ath9k_ps_wakeup(sc);
+
+	spin_lock_bh(&sc->sc_pcu_lock);
+
+	ath9k_hw_disable_interrupts(ah);
+	ah->imask = sc->wow_intr_before_sleep;
+	ath9k_hw_set_interrupts(ah);
+	ath9k_hw_enable_interrupts(ah);
+
+	spin_unlock_bh(&sc->sc_pcu_lock);
+
+	wow_status = ath9k_hw_wow_wakeup(ah);
+
+	if (atomic_read(&sc->wow_got_bmiss_intr) == 0) {
+		/*
+		 * some devices may not pick beacon miss
+		 * as the reason they woke up so we add
+		 * that here for that shortcoming.
+		 */
+		wow_status |= AH_WOW_BEACON_MISS;
+		atomic_dec(&sc->wow_got_bmiss_intr);
+		ath_dbg(common, ANY, "Beacon miss interrupt picked up during WoW sleep\n");
 	}
 
-	REG_RMW(ah, AR_WOW_BCN_EN, set, clr);
+	atomic_dec(&sc->wow_sleep_proc_intr);
 
-	set = 0;
-	clr = 0;
-	/*
-	 * Enable the magic packet registers
-	 */
-	if (pattern_enable & AH_WOW_MAGIC_PATTERN_EN) {
-		set = AR_WOW_MAGIC_EN;
-		wow_event_mask |= AR_WOW_MAGIC_PAT_FOUND;
-	} else {
-		clr = AR_WOW_MAGIC_EN;
+	if (wow_status) {
+		ath_dbg(common, ANY, "Waking up due to WoW triggers %s with WoW status = %x\n",
+			ath9k_hw_wow_event_to_string(wow_status), wow_status);
 	}
-	set |= AR_WOW_MAC_INTR_EN;
-	REG_RMW(ah, AR_WOW_PATTERN, set, clr);
 
-	REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
-		  AR_WOW_PATTERN_SUPPORTED);
+	ath_restart_work(sc);
+	ath9k_start_btcoex(sc);
 
-	/*
-	 * Set the power states appropriately and enable PME
-	 */
-	clr = 0;
-	set = AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN |
-	      AR_PMCTRL_PWR_PM_CTRL_ENA;
+	ath9k_ps_restore(sc);
+	mutex_unlock(&sc->mutex);
 
-	clr = AR_PCIE_PM_CTRL_ENA;
-	REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr);
+	return 0;
+}
 
-	/*
-	 * this is needed to prevent the chip waking up
-	 * the host within 3-4 seconds with certain
-	 * platform/BIOS. The fix is to enable
-	 * D1 & D3 to match original definition and
-	 * also match the OTP value. Anyway this
-	 * is more related to SW WOW.
-	 */
-	clr = AR_PMCTRL_PWR_STATE_D1D3;
-	REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
+void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+	struct ath_softc *sc = hw->priv;
 
-	set = AR_PMCTRL_PWR_STATE_D1D3_REAL;
-	REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
+	mutex_lock(&sc->mutex);
+	device_init_wakeup(sc->dev, 1);
+	device_set_wakeup_enable(sc->dev, enabled);
+	mutex_unlock(&sc->mutex);
+}
 
-	REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
+void ath9k_init_wow(struct ieee80211_hw *hw)
+{
+	struct ath_softc *sc = hw->priv;
 
-	/* to bring down WOW power low margin */
-	set = BIT(13);
-	REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set);
-	/* HW WoW */
-	clr = BIT(5);
-	REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr);
+	if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
+	    (sc->driver_data & ATH9K_PCI_WOW) &&
+	    device_can_wakeup(sc->dev))
+		hw->wiphy->wowlan = &ath9k_wowlan_support;
 
-	ath9k_hw_set_powermode_wow_sleep(ah);
-	ah->wow_event_mask = wow_event_mask;
+	atomic_set(&sc->wow_sleep_proc_intr, -1);
+	atomic_set(&sc->wow_got_bmiss_intr, -1);
 }
-EXPORT_SYMBOL(ath9k_hw_wow_enable);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index b5a19e098f2d..0a75e2f68c9d 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -47,8 +47,6 @@ static u16 bits_per_symbol[][2] = {
 	{   260,  540 },     /*  7: 64-QAM 5/6 */
 };
 
-#define IS_HT_RATE(_rate)     ((_rate) & 0x80)
-
 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
 			       struct ath_atx_tid *tid, struct sk_buff *skb);
 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
@@ -174,14 +172,7 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
 static struct ath_atx_tid *
 ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
 {
-	struct ieee80211_hdr *hdr;
-	u8 tidno = 0;
-
-	hdr = (struct ieee80211_hdr *) skb->data;
-	if (ieee80211_is_data_qos(hdr->frame_control))
-		tidno = ieee80211_get_qos_ctl(hdr)[0];
-
-	tidno &= IEEE80211_QOS_CTL_TID_MASK;
+	u8 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
 	return ATH_AN_2_TID(an, tidno);
 }
 
@@ -781,11 +772,6 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
 	if (bt_aggr_limit)
 		aggr_limit = bt_aggr_limit;
 
-	/*
-	 * h/w can accept aggregates up to 16 bit lengths (65535).
-	 * The IE, however can hold up to 65536, which shows up here
-	 * as zero. Ignore 65536 since we  are constrained by hw.
-	 */
 	if (tid->an->maxampdu)
 		aggr_limit = min(aggr_limit, tid->an->maxampdu);
 
@@ -1410,8 +1396,8 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
 	 * has already been added.
 	 */
 	if (sta->ht_cap.ht_supported) {
-		an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
-				     sta->ht_cap.ampdu_factor);
+		an->maxampdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+				      sta->ht_cap.ampdu_factor)) - 1;
 		density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
 		an->mpdudensity = density;
 	}
@@ -1790,6 +1776,9 @@ bool ath_drain_all_txq(struct ath_softc *sc)
 		if (!ATH_TXQ_SETUP(sc, i))
 			continue;
 
+		if (!sc->tx.txq[i].axq_depth)
+			continue;
+
 		if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
 			npend |= BIT(i);
 	}
@@ -2753,6 +2742,8 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
 	}
 }
 
+#ifdef CONFIG_ATH9K_TX99
+
 int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
 		    struct ath_tx_control *txctl)
 {
@@ -2795,3 +2786,5 @@ int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
 
 	return 0;
 }
+
+#endif /* CONFIG_ATH9K_TX99 */
diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
index 3d70cd277fd7..1c0af9cd9a85 100644
--- a/drivers/net/wireless/ath/carl9170/debug.c
+++ b/drivers/net/wireless/ath/carl9170/debug.c
@@ -37,7 +37,6 @@
  *    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/seq_file.h>
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 349fa22a921a..4c8cdb097b65 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -37,7 +37,6 @@
  *    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/etherdevice.h>
@@ -1968,18 +1967,6 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
 		return -ENOMEM;
 	ar->num_channels = chans;
 
-	/*
-	 * I measured this, a bandswitch takes roughly
-	 * 135 ms and a frequency switch about 80.
-	 *
-	 * FIXME: measure these values again once EEPROM settings
-	 *	  are used, that will influence them!
-	 */
-	if (bands == 2)
-		ar->hw->channel_change_time = 135 * 1000;
-	else
-		ar->hw->channel_change_time = 80 * 1000;
-
 	regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
 
 	/* second part of wiphy init */
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index e935f61c7fad..536bc46a2912 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -37,7 +37,6 @@
  *    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/etherdevice.h>
@@ -520,6 +519,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
 {
 	struct ieee80211_hdr *hdr = data;
 	struct ieee80211_tim_ie *tim_ie;
+	struct ath_common *common = &ar->common;
 	u8 *tim;
 	u8 tim_len;
 	bool cam;
@@ -527,17 +527,13 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
 	if (likely(!(ar->hw->conf.flags & IEEE80211_CONF_PS)))
 		return;
 
-	/* check if this really is a beacon */
-	if (!ieee80211_is_beacon(hdr->frame_control))
-		return;
-
 	/* min. beacon length + FCS_LEN */
 	if (len <= 40 + FCS_LEN)
 		return;
 
+	/* check if this really is a beacon */
 	/* and only beacons from the associated BSSID, please */
-	if (!ether_addr_equal(hdr->addr3, ar->common.curbssid) ||
-	    !ar->common.curaid)
+	if (!ath_is_mybeacon(common, hdr) || !common->curaid)
 		return;
 
 	ar->ps.last_beacon = jiffies;
@@ -602,8 +598,8 @@ static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
 
 		if (bar->start_seq_num == entry_bar->start_seq_num &&
 		    TID_CHECK(bar->control, entry_bar->control) &&
-		    ether_addr_equal(bar->ra, entry_bar->ta) &&
-		    ether_addr_equal(bar->ta, entry_bar->ra)) {
+		    ether_addr_equal_64bits(bar->ra, entry_bar->ta) &&
+		    ether_addr_equal_64bits(bar->ta, entry_bar->ra)) {
 			struct ieee80211_tx_info *tx_info;
 
 			tx_info = IEEE80211_SKB_CB(entry_skb);
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index e3f696ee4d23..4cadfd48ffdf 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -37,7 +37,6 @@
  *    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c
index 8e99540cd90e..8b0ac14d5c32 100644
--- a/drivers/net/wireless/ath/main.c
+++ b/drivers/net/wireless/ath/main.c
@@ -59,6 +59,14 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
 }
 EXPORT_SYMBOL(ath_rxbuf_alloc);
 
+bool ath_is_mybeacon(struct ath_common *common, struct ieee80211_hdr *hdr)
+{
+	return ieee80211_is_beacon(hdr->frame_control) &&
+		!is_zero_ether_addr(common->curbssid) &&
+		ether_addr_equal_64bits(hdr->addr3, common->curbssid);
+}
+EXPORT_SYMBOL(ath_is_mybeacon);
+
 void ath_printk(const char *level, const struct ath_common* common,
 		const char *fmt, ...)
 {
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 1217c52ab28e..e5e905910db4 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -37,17 +37,18 @@ static int __ath_regd_init(struct ath_regulatory *reg);
 
 /* We enable active scan on these a case by case basis by regulatory domain */
 #define ATH9K_2GHZ_CH12_13	REG_RULE(2467-10, 2472+10, 40, 0, 20,\
-					NL80211_RRF_PASSIVE_SCAN)
+					 NL80211_RRF_NO_IR)
 #define ATH9K_2GHZ_CH14		REG_RULE(2484-10, 2484+10, 40, 0, 20,\
-				NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM)
+					 NL80211_RRF_NO_IR | \
+					 NL80211_RRF_NO_OFDM)
 
 /* We allow IBSS on these on a case by case basis by regulatory domain */
 #define ATH9K_5GHZ_5150_5350	REG_RULE(5150-10, 5350+10, 80, 0, 30,\
-				NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
+					 NL80211_RRF_NO_IR)
 #define ATH9K_5GHZ_5470_5850	REG_RULE(5470-10, 5850+10, 80, 0, 30,\
-				NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
+					 NL80211_RRF_NO_IR)
 #define ATH9K_5GHZ_5725_5850	REG_RULE(5725-10, 5850+10, 80, 0, 30,\
-				NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
+					 NL80211_RRF_NO_IR)
 
 #define ATH9K_2GHZ_ALL		ATH9K_2GHZ_CH01_11, \
 				ATH9K_2GHZ_CH12_13, \
@@ -113,6 +114,87 @@ static const struct ieee80211_regdomain ath_world_regdom_67_68_6A_6C = {
 	}
 };
 
+static bool dynamic_country_user_possible(struct ath_regulatory *reg)
+{
+	if (config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
+		return true;
+
+	switch (reg->country_code) {
+	case CTRY_UNITED_STATES:
+	case CTRY_JAPAN1:
+	case CTRY_JAPAN2:
+	case CTRY_JAPAN3:
+	case CTRY_JAPAN4:
+	case CTRY_JAPAN5:
+	case CTRY_JAPAN6:
+	case CTRY_JAPAN7:
+	case CTRY_JAPAN8:
+	case CTRY_JAPAN9:
+	case CTRY_JAPAN10:
+	case CTRY_JAPAN11:
+	case CTRY_JAPAN12:
+	case CTRY_JAPAN13:
+	case CTRY_JAPAN14:
+	case CTRY_JAPAN15:
+	case CTRY_JAPAN16:
+	case CTRY_JAPAN17:
+	case CTRY_JAPAN18:
+	case CTRY_JAPAN19:
+	case CTRY_JAPAN20:
+	case CTRY_JAPAN21:
+	case CTRY_JAPAN22:
+	case CTRY_JAPAN23:
+	case CTRY_JAPAN24:
+	case CTRY_JAPAN25:
+	case CTRY_JAPAN26:
+	case CTRY_JAPAN27:
+	case CTRY_JAPAN28:
+	case CTRY_JAPAN29:
+	case CTRY_JAPAN30:
+	case CTRY_JAPAN31:
+	case CTRY_JAPAN32:
+	case CTRY_JAPAN33:
+	case CTRY_JAPAN34:
+	case CTRY_JAPAN35:
+	case CTRY_JAPAN36:
+	case CTRY_JAPAN37:
+	case CTRY_JAPAN38:
+	case CTRY_JAPAN39:
+	case CTRY_JAPAN40:
+	case CTRY_JAPAN41:
+	case CTRY_JAPAN42:
+	case CTRY_JAPAN43:
+	case CTRY_JAPAN44:
+	case CTRY_JAPAN45:
+	case CTRY_JAPAN46:
+	case CTRY_JAPAN47:
+	case CTRY_JAPAN48:
+	case CTRY_JAPAN49:
+	case CTRY_JAPAN50:
+	case CTRY_JAPAN51:
+	case CTRY_JAPAN52:
+	case CTRY_JAPAN53:
+	case CTRY_JAPAN54:
+	case CTRY_JAPAN55:
+	case CTRY_JAPAN56:
+	case CTRY_JAPAN57:
+	case CTRY_JAPAN58:
+	case CTRY_JAPAN59:
+		return false;
+	}
+
+	return true;
+}
+
+static bool ath_reg_dyn_country_user_allow(struct ath_regulatory *reg)
+{
+	if (!config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
+		return false;
+	if (!dynamic_country_user_possible(reg))
+		return false;
+	return true;
+}
+
 static inline bool is_wwr_sku(u16 regd)
 {
 	return ((regd & COUNTRY_ERD_FLAG) != COUNTRY_ERD_FLAG) &&
@@ -177,118 +259,139 @@ static bool ath_is_radar_freq(u16 center_freq)
 	return (center_freq >= 5260 && center_freq <= 5700);
 }
 
+static void ath_force_clear_no_ir_chan(struct wiphy *wiphy,
+				       struct ieee80211_channel *ch)
+{
+	const struct ieee80211_reg_rule *reg_rule;
+
+	reg_rule = freq_reg_info(wiphy, MHZ_TO_KHZ(ch->center_freq));
+	if (IS_ERR(reg_rule))
+		return;
+
+	if (!(reg_rule->flags & NL80211_RRF_NO_IR))
+		if (ch->flags & IEEE80211_CHAN_NO_IR)
+			ch->flags &= ~IEEE80211_CHAN_NO_IR;
+}
+
+static void ath_force_clear_no_ir_freq(struct wiphy *wiphy, u16 center_freq)
+{
+	struct ieee80211_channel *ch;
+
+	ch = ieee80211_get_channel(wiphy, center_freq);
+	if (!ch)
+		return;
+
+	ath_force_clear_no_ir_chan(wiphy, ch);
+}
+
+static void ath_force_no_ir_chan(struct ieee80211_channel *ch)
+{
+	ch->flags |= IEEE80211_CHAN_NO_IR;
+}
+
+static void ath_force_no_ir_freq(struct wiphy *wiphy, u16 center_freq)
+{
+	struct ieee80211_channel *ch;
+
+	ch = ieee80211_get_channel(wiphy, center_freq);
+	if (!ch)
+		return;
+
+	ath_force_no_ir_chan(ch);
+}
+
+static void
+__ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
+				struct ath_regulatory *reg,
+				enum nl80211_reg_initiator initiator,
+				struct ieee80211_channel *ch)
+{
+	if (ath_is_radar_freq(ch->center_freq) ||
+	    (ch->flags & IEEE80211_CHAN_RADAR))
+		return;
+
+	switch (initiator) {
+	case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+		ath_force_clear_no_ir_chan(wiphy, ch);
+		break;
+	case NL80211_REGDOM_SET_BY_USER:
+		if (ath_reg_dyn_country_user_allow(reg))
+			ath_force_clear_no_ir_chan(wiphy, ch);
+		break;
+	default:
+		if (ch->beacon_found)
+			ch->flags &= ~IEEE80211_CHAN_NO_IR;
+	}
+}
+
 /*
- * N.B: These exception rules do not apply radar freqs.
+ * These exception rules do not apply radar frequencies.
  *
- * - We enable adhoc (or beaconing) if allowed by 11d
- * - We enable active scan if the channel is allowed by 11d
+ * - We enable initiating radiation if the country IE says its fine:
  * - If no country IE has been processed and a we determine we have
- *   received a beacon on a channel we can enable active scan and
- *   adhoc (or beaconing).
+ *   received a beacon on a channel we can enable initiating radiation.
  */
 static void
 ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
+			      struct ath_regulatory *reg,
 			      enum nl80211_reg_initiator initiator)
 {
 	enum ieee80211_band band;
 	struct ieee80211_supported_band *sband;
-	const struct ieee80211_reg_rule *reg_rule;
 	struct ieee80211_channel *ch;
 	unsigned int i;
 
 	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
-
 		if (!wiphy->bands[band])
 			continue;
-
 		sband = wiphy->bands[band];
-
 		for (i = 0; i < sband->n_channels; i++) {
-
 			ch = &sband->channels[i];
-
-			if (ath_is_radar_freq(ch->center_freq) ||
-			    (ch->flags & IEEE80211_CHAN_RADAR))
-				continue;
-
-			if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
-				reg_rule = freq_reg_info(wiphy, ch->center_freq);
-				if (IS_ERR(reg_rule))
-					continue;
-				/*
-				 * If 11d had a rule for this channel ensure
-				 * we enable adhoc/beaconing if it allows us to
-				 * use it. Note that we would have disabled it
-				 * by applying our static world regdomain by
-				 * default during init, prior to calling our
-				 * regulatory_hint().
-				 */
-				if (!(reg_rule->flags &
-				    NL80211_RRF_NO_IBSS))
-					ch->flags &=
-					  ~IEEE80211_CHAN_NO_IBSS;
-				if (!(reg_rule->flags &
-				    NL80211_RRF_PASSIVE_SCAN))
-					ch->flags &=
-					  ~IEEE80211_CHAN_PASSIVE_SCAN;
-			} else {
-				if (ch->beacon_found)
-					ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
-					  IEEE80211_CHAN_PASSIVE_SCAN);
-			}
+			__ath_reg_apply_beaconing_flags(wiphy, reg,
+							initiator, ch);
 		}
 	}
-
 }
 
-/* Allows active scan scan on Ch 12 and 13 */
+/**
+ * ath_reg_apply_ir_flags()
+ * @wiphy: the wiphy to use
+ * @initiator: the regulatory hint initiator
+ *
+ * If no country IE has been received always enable passive scan
+ * and no-ibss on these channels. This is only done for specific
+ * regulatory SKUs.
+ *
+ * If a country IE has been received check its rule for this
+ * channel first before enabling active scan. The passive scan
+ * would have been enforced by the initial processing of our
+ * custom regulatory domain.
+ */
 static void
-ath_reg_apply_active_scan_flags(struct wiphy *wiphy,
-				enum nl80211_reg_initiator initiator)
+ath_reg_apply_ir_flags(struct wiphy *wiphy,
+		       struct ath_regulatory *reg,
+		       enum nl80211_reg_initiator initiator)
 {
 	struct ieee80211_supported_band *sband;
-	struct ieee80211_channel *ch;
-	const struct ieee80211_reg_rule *reg_rule;
 
 	sband = wiphy->bands[IEEE80211_BAND_2GHZ];
 	if (!sband)
 		return;
 
-	/*
-	 * If no country IE has been received always enable active scan
-	 * on these channels. This is only done for specific regulatory SKUs
-	 */
-	if (initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) {
-		ch = &sband->channels[11]; /* CH 12 */
-		if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
-			ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
-		ch = &sband->channels[12]; /* CH 13 */
-		if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
-			ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
-		return;
-	}
-
-	/*
-	 * If a country IE has been received check its rule for this
-	 * channel first before enabling active scan. The passive scan
-	 * would have been enforced by the initial processing of our
-	 * custom regulatory domain.
-	 */
-
-	ch = &sband->channels[11]; /* CH 12 */
-	reg_rule = freq_reg_info(wiphy, ch->center_freq);
-	if (!IS_ERR(reg_rule)) {
-		if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
-			if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
-				ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
-	}
-
-	ch = &sband->channels[12]; /* CH 13 */
-	reg_rule = freq_reg_info(wiphy, ch->center_freq);
-	if (!IS_ERR(reg_rule)) {
-		if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
-			if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
-				ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+	switch(initiator) {
+	case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+		ath_force_clear_no_ir_freq(wiphy, 2467);
+		ath_force_clear_no_ir_freq(wiphy, 2472);
+		break;
+	case NL80211_REGDOM_SET_BY_USER:
+		if (!ath_reg_dyn_country_user_allow(reg))
+			break;
+		ath_force_clear_no_ir_freq(wiphy, 2467);
+		ath_force_clear_no_ir_freq(wiphy, 2472);
+		break;
+	default:
+		ath_force_no_ir_freq(wiphy, 2467);
+		ath_force_no_ir_freq(wiphy, 2472);
 	}
 }
 
@@ -320,8 +423,7 @@ static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
 		 */
 		if (!(ch->flags & IEEE80211_CHAN_DISABLED))
 			ch->flags |= IEEE80211_CHAN_RADAR |
-				     IEEE80211_CHAN_NO_IBSS |
-				     IEEE80211_CHAN_PASSIVE_SCAN;
+				     IEEE80211_CHAN_NO_IR;
 	}
 }
 
@@ -335,12 +437,15 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy,
 	case 0x66:
 	case 0x67:
 	case 0x6C:
-		ath_reg_apply_beaconing_flags(wiphy, initiator);
+		ath_reg_apply_beaconing_flags(wiphy, reg, initiator);
 		break;
 	case 0x68:
-		ath_reg_apply_beaconing_flags(wiphy, initiator);
-		ath_reg_apply_active_scan_flags(wiphy, initiator);
+		ath_reg_apply_beaconing_flags(wiphy, reg, initiator);
+		ath_reg_apply_ir_flags(wiphy, reg, initiator);
 		break;
+	default:
+		if (ath_reg_dyn_country_user_allow(reg))
+			ath_reg_apply_beaconing_flags(wiphy, reg, initiator);
 	}
 }
 
@@ -393,89 +498,6 @@ static void ath_reg_dyn_country(struct wiphy *wiphy,
 	       reg_initiator_name(request->initiator));
 }
 
-static bool dynamic_country_user_possible(struct ath_regulatory *reg)
-{
-	if (config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
-		return true;
-
-	switch (reg->country_code) {
-	case CTRY_UNITED_STATES:
-	case CTRY_JAPAN1:
-	case CTRY_JAPAN2:
-	case CTRY_JAPAN3:
-	case CTRY_JAPAN4:
-	case CTRY_JAPAN5:
-	case CTRY_JAPAN6:
-	case CTRY_JAPAN7:
-	case CTRY_JAPAN8:
-	case CTRY_JAPAN9:
-	case CTRY_JAPAN10:
-	case CTRY_JAPAN11:
-	case CTRY_JAPAN12:
-	case CTRY_JAPAN13:
-	case CTRY_JAPAN14:
-	case CTRY_JAPAN15:
-	case CTRY_JAPAN16:
-	case CTRY_JAPAN17:
-	case CTRY_JAPAN18:
-	case CTRY_JAPAN19:
-	case CTRY_JAPAN20:
-	case CTRY_JAPAN21:
-	case CTRY_JAPAN22:
-	case CTRY_JAPAN23:
-	case CTRY_JAPAN24:
-	case CTRY_JAPAN25:
-	case CTRY_JAPAN26:
-	case CTRY_JAPAN27:
-	case CTRY_JAPAN28:
-	case CTRY_JAPAN29:
-	case CTRY_JAPAN30:
-	case CTRY_JAPAN31:
-	case CTRY_JAPAN32:
-	case CTRY_JAPAN33:
-	case CTRY_JAPAN34:
-	case CTRY_JAPAN35:
-	case CTRY_JAPAN36:
-	case CTRY_JAPAN37:
-	case CTRY_JAPAN38:
-	case CTRY_JAPAN39:
-	case CTRY_JAPAN40:
-	case CTRY_JAPAN41:
-	case CTRY_JAPAN42:
-	case CTRY_JAPAN43:
-	case CTRY_JAPAN44:
-	case CTRY_JAPAN45:
-	case CTRY_JAPAN46:
-	case CTRY_JAPAN47:
-	case CTRY_JAPAN48:
-	case CTRY_JAPAN49:
-	case CTRY_JAPAN50:
-	case CTRY_JAPAN51:
-	case CTRY_JAPAN52:
-	case CTRY_JAPAN53:
-	case CTRY_JAPAN54:
-	case CTRY_JAPAN55:
-	case CTRY_JAPAN56:
-	case CTRY_JAPAN57:
-	case CTRY_JAPAN58:
-	case CTRY_JAPAN59:
-		return false;
-	}
-
-	return true;
-}
-
-static void ath_reg_dyn_country_user(struct wiphy *wiphy,
-				     struct ath_regulatory *reg,
-				     struct regulatory_request *request)
-{
-	if (!config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
-		return;
-	if (!dynamic_country_user_possible(reg))
-		return;
-	ath_reg_dyn_country(wiphy, reg, request);
-}
-
 void ath_reg_notifier_apply(struct wiphy *wiphy,
 			    struct regulatory_request *request,
 			    struct ath_regulatory *reg)
@@ -508,7 +530,8 @@ void ath_reg_notifier_apply(struct wiphy *wiphy,
 	case NL80211_REGDOM_SET_BY_DRIVER:
 		break;
 	case NL80211_REGDOM_SET_BY_USER:
-		ath_reg_dyn_country_user(wiphy, reg, request);
+		if (ath_reg_dyn_country_user_allow(reg))
+			ath_reg_dyn_country(wiphy, reg, request);
 		break;
 	case NL80211_REGDOM_SET_BY_COUNTRY_IE:
 		ath_reg_dyn_country(wiphy, reg, request);
@@ -609,7 +632,8 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
 	const struct ieee80211_regdomain *regd;
 
 	wiphy->reg_notifier = reg_notifier;
-	wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
+	wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
+				   REGULATORY_CUSTOM_REG;
 
 	if (ath_is_world_regd(reg)) {
 		/*
@@ -617,7 +641,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
 		 * saved on the wiphy orig_* parameters
 		 */
 		regd = ath_world_regdomain(reg);
-		wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+		wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_FOLLOW_POWER;
 	} else {
 		/*
 		 * This gets applied in the case of the absence of CRDA,
@@ -626,6 +650,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
 		 */
 		regd = ath_default_world_regdomain();
 	}
+
 	wiphy_apply_custom_regulatory(wiphy, regd);
 	ath_reg_apply_radar_flags(wiphy);
 	ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index c02dbc618724..3c2ef0c32f72 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -2644,7 +2644,7 @@ struct wcn36xx_hal_trigger_ba_rsp_candidate {
 	struct add_ba_info ba_info[STACFG_MAX_TC];
 } __packed;
 
-struct wcn36xx_hal_trigget_ba_req_candidate {
+struct wcn36xx_hal_trigger_ba_req_candidate {
 	u8 sta_index;
 	u8 tid_bitmap;
 } __packed;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 7839b31e4826..e64a6784079e 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -641,7 +641,8 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
 		dev_kfree_skb(skb);
 	}
 
-	if (changed & BSS_CHANGED_BEACON_ENABLED) {
+	if (changed & BSS_CHANGED_BEACON_ENABLED ||
+	    changed & BSS_CHANGED_BEACON) {
 		wcn36xx_dbg(WCN36XX_DBG_MAC,
 			    "mac bss changed beacon enabled %d\n",
 			    bss_conf->enable_beacon);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 366339421d4f..750626b0e22d 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -115,6 +115,22 @@ static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
 	}
 }
 
+static void wcn36xx_smd_set_sta_default_ht_params(
+		struct wcn36xx_hal_config_sta_params *sta_params)
+{
+	sta_params->ht_capable = 1;
+	sta_params->tx_channel_width_set = 1;
+	sta_params->lsig_txop_protection = 1;
+	sta_params->max_ampdu_size = 3;
+	sta_params->max_ampdu_density = 5;
+	sta_params->max_amsdu_size = 0;
+	sta_params->sgi_20Mhz = 1;
+	sta_params->sgi_40mhz = 1;
+	sta_params->green_field_capable = 1;
+	sta_params->delayed_ba_support = 0;
+	sta_params->dsss_cck_mode_40mhz = 1;
+}
+
 static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
 		struct ieee80211_vif *vif,
 		struct ieee80211_sta *sta,
@@ -172,6 +188,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
 			sizeof(priv_sta->supported_rates));
 	} else {
 		wcn36xx_set_default_rates(&sta_params->supported_rates);
+		wcn36xx_smd_set_sta_default_ht_params(sta_params);
 	}
 }
 
@@ -1134,14 +1151,14 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
 		/* STA */
 		bss->oper_mode = 1;
 		bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_MODE;
-	} else if (vif->type == NL80211_IFTYPE_AP) {
+	} else if (vif->type == NL80211_IFTYPE_AP ||
+		   vif->type == NL80211_IFTYPE_MESH_POINT) {
 		bss->bss_type = WCN36XX_HAL_INFRA_AP_MODE;
 
 		/* AP */
 		bss->oper_mode = 0;
 		bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_SAP_MODE;
-	} else if (vif->type == NL80211_IFTYPE_ADHOC ||
-		   vif->type == NL80211_IFTYPE_MESH_POINT) {
+	} else if (vif->type == NL80211_IFTYPE_ADHOC) {
 		bss->bss_type = WCN36XX_HAL_IBSS_MODE;
 
 		/* STA */
@@ -1292,7 +1309,11 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
 	memcpy(msg_body.bssid, vif->addr, ETH_ALEN);
 
 	/* TODO need to find out why this is needed? */
-	msg_body.tim_ie_offset = tim_off+4;
+	if (vif->type == NL80211_IFTYPE_MESH_POINT)
+		/* mesh beacon don't need this, so push further down */
+		msg_body.tim_ie_offset = 256;
+	else
+		msg_body.tim_ie_offset = tim_off+4;
 	msg_body.p2p_ie_offset = p2p_off;
 	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
 
@@ -1838,7 +1859,7 @@ out:
 int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
 {
 	struct wcn36xx_hal_trigger_ba_req_msg msg_body;
-	struct wcn36xx_hal_trigget_ba_req_candidate *candidate;
+	struct wcn36xx_hal_trigger_ba_req_candidate *candidate;
 	int ret = 0;
 
 	mutex_lock(&wcn->hal_mutex);
@@ -1849,7 +1870,7 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
 	msg_body.header.len += sizeof(*candidate);
 	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
 
-	candidate = (struct wcn36xx_hal_trigget_ba_req_candidate *)
+	candidate = (struct wcn36xx_hal_trigger_ba_req_candidate *)
 		(wcn->hal_buf + sizeof(msg_body));
 	candidate->sta_index = sta_index;
 	candidate->tid_bitmap = 1;
@@ -2039,22 +2060,28 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
 	case WCN36XX_HAL_OTA_TX_COMPL_IND:
 	case WCN36XX_HAL_MISSED_BEACON_IND:
 	case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
-		mutex_lock(&wcn->hal_ind_mutex);
 		msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL);
-		if (msg_ind) {
-			msg_ind->msg_len = len;
-			msg_ind->msg = kmalloc(len, GFP_KERNEL);
-			memcpy(msg_ind->msg, buf, len);
-			list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
-			queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
-			wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
+		if (!msg_ind)
+			goto nomem;
+		msg_ind->msg_len = len;
+		msg_ind->msg = kmalloc(len, GFP_KERNEL);
+		if (!msg_ind->msg) {
+			kfree(msg_ind);
+nomem:
+			/*
+			 * FIXME: Do something smarter then just
+			 * printing an error.
+			 */
+			wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
+				    msg_header->msg_type);
+			break;
 		}
+		memcpy(msg_ind->msg, buf, len);
+		mutex_lock(&wcn->hal_ind_mutex);
+		list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
+		queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
 		mutex_unlock(&wcn->hal_ind_mutex);
-		if (msg_ind)
-			break;
-		/* FIXME: Do something smarter then just printing an error. */
-		wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
-			    msg_header->msg_type);
+		wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
 		break;
 	default:
 		wcn36xx_err("SMD_EVENT (%d) not supported\n",
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 58b63833e8e7..8fa5cbace5ab 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -54,7 +54,7 @@ enum wcn36xx_debug_mask {
 };
 
 #define wcn36xx_err(fmt, arg...)				\
-	printk(KERN_ERR pr_fmt("ERROR " fmt), ##arg);
+	printk(KERN_ERR pr_fmt("ERROR " fmt), ##arg)
 
 #define wcn36xx_warn(fmt, arg...)				\
 	printk(KERN_WARNING pr_fmt("WARNING " fmt), ##arg)
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 8205d3e4ab66..10919f95a83c 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -156,6 +156,19 @@ void wil6210_enable_irq(struct wil6210_priv *wil)
 	iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
 		  offsetof(struct RGF_ICR, ICC));
 
+	/* interrupt moderation parameters */
+	if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+		/* disable interrupt moderation for monitor
+		 * to get better timestamp precision
+		 */
+		iowrite32(0, wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_CRL));
+	} else {
+		iowrite32(WIL6210_ITR_TRSH,
+			  wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_TRSH));
+		iowrite32(BIT_DMA_ITR_CNT_CRL_EN,
+			  wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_CRL));
+	}
+
 	wil6210_unmask_irq_pseudo(wil);
 	wil6210_unmask_irq_tx(wil);
 	wil6210_unmask_irq_rx(wil);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index d505b2676a73..0b0975d88b43 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -21,6 +21,7 @@
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <net/ipv6.h>
+#include <linux/prefetch.h>
 
 #include "wil6210.h"
 #include "wmi.h"
@@ -377,6 +378,8 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
 	}
 	skb_trim(skb, dmalen);
 
+	prefetch(skb->data);
+
 	wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
 			  skb->data, skb_headlen(skb), false);
 
@@ -673,9 +676,12 @@ static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
 	if (skb->ip_summed != CHECKSUM_PARTIAL)
 		return 0;
 
+	d->dma.b11 = ETH_HLEN; /* MAC header length */
+
 	switch (skb->protocol) {
 	case cpu_to_be16(ETH_P_IP):
 		protocol = ip_hdr(skb)->protocol;
+		d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
 		break;
 	case cpu_to_be16(ETH_P_IPV6):
 		protocol = ipv6_hdr(skb)->nexthdr;
@@ -701,8 +707,6 @@ static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
 	}
 
 	d->dma.ip_length = skb_network_header_len(skb);
-	d->dma.b11 = ETH_HLEN; /* MAC header length */
-	d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
 	/* Enable TCP/UDP checksum */
 	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
 	/* Calculate pseudo-header */
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index c4a51638736a..1f91eaf95bbe 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -39,6 +39,7 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
 #define WIL6210_MAX_TX_RINGS	(24) /* HW limit */
 #define WIL6210_MAX_CID		(8) /* HW limit */
 #define WIL6210_NAPI_BUDGET	(16) /* arbitrary */
+#define WIL6210_ITR_TRSH	(10000) /* arbitrary - about 15 IRQs/msec */
 
 /* Hardware definitions begin */
 
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 0d950f209dae..bf93ea859f2d 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -28,8 +28,8 @@
     GNU General Public License for more details.
 
     You should have received a copy of the GNU General Public License
-    along with Atmel wireless lan drivers; if not, write to the Free Software
-    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+    along with Atmel wireless lan drivers; if not, see
+    <http://www.gnu.org/licenses/>.
 
     For all queries about this code, please contact the current author,
     Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation.
@@ -39,7 +39,6 @@
 
 ******************************************************************************/
 
-#include <linux/init.h>
 #include <linux/interrupt.h>
 
 #include <linux/kernel.h>
@@ -4278,8 +4277,7 @@ static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data)
     GNU General Public License for more details.
 
     You should have received a copy of the GNU General Public License
-    along with AtmelMACFW; if not, write to the Free Software
-    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+    along with AtmelMACFW; if not, see <http://www.gnu.org/licenses/>.
 
 ****************************************************************************/
 /* This firmware should work on the 76C502 RFMD, RFMD_D, and RFMD_E        */
diff --git a/drivers/net/wireless/atmel.h b/drivers/net/wireless/atmel.h
index b9b3e5b76544..96f7318cbb04 100644
--- a/drivers/net/wireless/atmel.h
+++ b/drivers/net/wireless/atmel.h
@@ -15,8 +15,8 @@
     GNU General Public License for more details.
 
     You should have received a copy of the GNU General Public License
-    along with Atmel wireless lan drivers; if not, write to the Free Software
-    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+    along with Atmel wireless lan drivers; if not, see
+    <http://www.gnu.org/licenses/>.
 
 ******************************************************************************/
 
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index 522572219217..4cfb4d99ced0 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -24,15 +24,14 @@
     GNU General Public License for more details.
 
     You should have received a copy of the GNU General Public License
-    along with Atmel wireless lan drivers; if not, write to the Free Software
-    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+    along with Atmel wireless lan drivers; if not, see
+    <http://www.gnu.org/licenses/>.
 
 ******************************************************************************/
 
 #ifdef __IN_PCMCIA_PACKAGE__
 #include <pcmcia/k_compat.h>
 #endif
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/ptrace.h>
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index c1b159ebcffe..5cd97e3cbee3 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -15,14 +15,13 @@
     GNU General Public License for more details.
 
     You should have received a copy of the GNU General Public License
-    along with Atmel wireless lan drivers; if not, write to the Free Software
-    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+    along with Atmel wireless lan drivers; if not, see
+    <http://www.gnu.org/licenses/>.
 
 ******************************************************************************/
 #include <linux/pci.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include "atmel.h"
 
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 7f3d461f7e8d..54376fddfaf9 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -731,8 +731,6 @@ enum b43_firmware_file_type {
 struct b43_request_fw_context {
 	/* The device we are requesting the fw for. */
 	struct b43_wldev *dev;
-	/* a completion event structure needed if this call is asynchronous */
-	struct completion fw_load_complete;
 	/* a pointer to the firmware object */
 	const struct firmware *blob;
 	/* The type of firmware to request. */
@@ -809,6 +807,8 @@ enum {
 struct b43_wldev {
 	struct b43_bus_dev *dev;
 	struct b43_wl *wl;
+	/* a completion event structure needed if this call is asynchronous */
+	struct completion fw_load_complete;
 
 	/* The device initialization status.
 	 * Use b43_status() to query. */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index ccd24f0acb8d..c75237eb55a1 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2070,6 +2070,7 @@ void b43_do_release_fw(struct b43_firmware_file *fw)
 
 static void b43_release_firmware(struct b43_wldev *dev)
 {
+	complete(&dev->fw_load_complete);
 	b43_do_release_fw(&dev->fw.ucode);
 	b43_do_release_fw(&dev->fw.pcm);
 	b43_do_release_fw(&dev->fw.initvals);
@@ -2095,7 +2096,7 @@ static void b43_fw_cb(const struct firmware *firmware, void *context)
 	struct b43_request_fw_context *ctx = context;
 
 	ctx->blob = firmware;
-	complete(&ctx->fw_load_complete);
+	complete(&ctx->dev->fw_load_complete);
 }
 
 int b43_do_request_fw(struct b43_request_fw_context *ctx,
@@ -2142,7 +2143,7 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
 	}
 	if (async) {
 		/* do this part asynchronously */
-		init_completion(&ctx->fw_load_complete);
+		init_completion(&ctx->dev->fw_load_complete);
 		err = request_firmware_nowait(THIS_MODULE, 1, ctx->fwname,
 					      ctx->dev->dev->dev, GFP_KERNEL,
 					      ctx, b43_fw_cb);
@@ -2150,12 +2151,11 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
 			pr_err("Unable to load firmware\n");
 			return err;
 		}
-		/* stall here until fw ready */
-		wait_for_completion(&ctx->fw_load_complete);
+		wait_for_completion(&ctx->dev->fw_load_complete);
 		if (ctx->blob)
 			goto fw_ready;
 	/* On some ARM systems, the async request will fail, but the next sync
-	 * request works. For this reason, we dall through here
+	 * request works. For this reason, we fall through here
 	 */
 	}
 	err = request_firmware(&ctx->blob, ctx->fwname,
@@ -2424,6 +2424,7 @@ error:
 
 static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl);
 static void b43_one_core_detach(struct b43_bus_dev *dev);
+static int b43_rng_init(struct b43_wl *wl);
 
 static void b43_request_firmware(struct work_struct *work)
 {
@@ -2475,6 +2476,10 @@ start_ieee80211:
 		goto err_one_core_detach;
 	wl->hw_registred = true;
 	b43_leds_register(wl->current_dev);
+
+	/* Register HW RNG driver */
+	b43_rng_init(wl);
+
 	goto out;
 
 err_one_core_detach:
@@ -4636,9 +4641,6 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
 	if (!dev || b43_status(dev) != B43_STAT_INITIALIZED)
 		return;
 
-	/* Unregister HW RNG driver */
-	b43_rng_exit(dev->wl);
-
 	b43_set_status(dev, B43_STAT_UNINIT);
 
 	/* Stop the microcode PSM. */
@@ -4795,9 +4797,6 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
 
 	b43_set_status(dev, B43_STAT_INITIALIZED);
 
-	/* Register HW RNG driver */
-	b43_rng_init(dev->wl);
-
 out:
 	return err;
 
@@ -5464,6 +5463,9 @@ static void b43_bcma_remove(struct bcma_device *core)
 
 	b43_one_core_detach(wldev->dev);
 
+	/* Unregister HW RNG driver */
+	b43_rng_exit(wl);
+
 	b43_leds_unregister(wl);
 
 	ieee80211_free_hw(wl->hw);
@@ -5541,6 +5543,9 @@ static void b43_ssb_remove(struct ssb_device *sdev)
 
 	b43_one_core_detach(dev);
 
+	/* Unregister HW RNG driver */
+	b43_rng_exit(wl);
+
 	if (list_empty(&wl->devlist)) {
 		b43_leds_unregister(wl);
 		/* Last core on the chip unregistered.
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 4ae63f4ddfb2..50e5ddb12fb3 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -821,10 +821,10 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
 		 * channel number in b43. */
 		if (chanstat & B43_RX_CHAN_5GHZ) {
 			status.band = IEEE80211_BAND_5GHZ;
-			status.freq = b43_freq_to_channel_5ghz(chanid);
+			status.freq = b43_channel_to_freq_5ghz(chanid);
 		} else {
 			status.band = IEEE80211_BAND_2GHZ;
-			status.freq = b43_freq_to_channel_2ghz(chanid);
+			status.freq = b43_channel_to_freq_2ghz(chanid);
 		}
 		break;
 	default:
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 572668821862..349c77605231 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3919,6 +3919,7 @@ static void b43legacy_remove(struct ssb_device *dev)
 	 * as the ieee80211 unreg will destroy the workqueue. */
 	cancel_work_sync(&wldev->restart_work);
 	cancel_work_sync(&wl->firmware_load);
+	complete(&wldev->fw_load_complete);
 
 	B43legacy_WARN_ON(!wl);
 	if (!wldev->fw.ucode)
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index 54e36fcb3954..fcfed6b99a62 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -4,13 +4,12 @@ config BRCMUTIL
 config BRCMSMAC
 	tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver"
 	depends on MAC80211
-	depends on BCMA
+	depends on BCMA_POSSIBLE
+	select BCMA
 	select NEW_LEDS if BCMA_DRIVER_GPIO
 	select LEDS_CLASS if BCMA_DRIVER_GPIO
 	select BRCMUTIL
 	select FW_LOADER
-	select CRC_CCITT
-	select CRC8
 	select CORDIC
 	---help---
 	  This module adds support for PCIe wireless adapters based on Broadcom
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 8e9b1221b32c..57cddee03252 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -28,14 +28,15 @@ brcmfmac-objs += \
 		fweh.o \
 		fwsignal.o \
 		p2p.o \
-		dhd_cdc.o \
+		proto.o \
+		bcdc.o \
 		dhd_common.o \
 		dhd_linux.o \
+		nvram.o \
 		btcoex.o
 brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
 		dhd_sdio.o \
 		bcmsdh.o \
-		bcmsdh_sdmmc.o \
 		sdio_chip.o
 brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
 		usb.o
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
new file mode 100644
index 000000000000..c229210d50ba
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*******************************************************************************
+ * Communicates with the dongle by using dcmd codes.
+ * For certain dcmd codes, the dongle interprets string data from the host.
+ ******************************************************************************/
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+
+#include "dhd.h"
+#include "dhd_bus.h"
+#include "fwsignal.h"
+#include "dhd_dbg.h"
+#include "tracepoint.h"
+#include "proto.h"
+#include "bcdc.h"
+
+struct brcmf_proto_bcdc_dcmd {
+	__le32 cmd;	/* dongle command value */
+	__le32 len;	/* lower 16: output buflen;
+			 * upper 16: input buflen (excludes header) */
+	__le32 flags;	/* flag defns given below */
+	__le32 status;	/* status code returned from the device */
+};
+
+/* BCDC flag definitions */
+#define BCDC_DCMD_ERROR		0x01		/* 1=cmd failed */
+#define BCDC_DCMD_SET		0x02		/* 0=get, 1=set cmd */
+#define BCDC_DCMD_IF_MASK	0xF000		/* I/F index */
+#define BCDC_DCMD_IF_SHIFT	12
+#define BCDC_DCMD_ID_MASK	0xFFFF0000	/* id an cmd pairing */
+#define BCDC_DCMD_ID_SHIFT	16		/* ID Mask shift bits */
+#define BCDC_DCMD_ID(flags)	\
+	(((flags) & BCDC_DCMD_ID_MASK) >> BCDC_DCMD_ID_SHIFT)
+
+/*
+ * BCDC header - Broadcom specific extension of CDC.
+ * Used on data packets to convey priority across USB.
+ */
+#define	BCDC_HEADER_LEN		4
+#define BCDC_PROTO_VER		2	/* Protocol version */
+#define BCDC_FLAG_VER_MASK	0xf0	/* Protocol version mask */
+#define BCDC_FLAG_VER_SHIFT	4	/* Protocol version shift */
+#define BCDC_FLAG_SUM_GOOD	0x04	/* Good RX checksums */
+#define BCDC_FLAG_SUM_NEEDED	0x08	/* Dongle needs to do TX checksums */
+#define BCDC_PRIORITY_MASK	0x7
+#define BCDC_FLAG2_IF_MASK	0x0f	/* packet rx interface in APSTA */
+#define BCDC_FLAG2_IF_SHIFT	0
+
+#define BCDC_GET_IF_IDX(hdr) \
+	((int)((((hdr)->flags2) & BCDC_FLAG2_IF_MASK) >> BCDC_FLAG2_IF_SHIFT))
+#define BCDC_SET_IF_IDX(hdr, idx) \
+	((hdr)->flags2 = (((hdr)->flags2 & ~BCDC_FLAG2_IF_MASK) | \
+	((idx) << BCDC_FLAG2_IF_SHIFT)))
+
+/**
+ * struct brcmf_proto_bcdc_header - BCDC header format
+ *
+ * @flags: flags contain protocol and checksum info.
+ * @priority: 802.1d priority and USB flow control info (bit 4:7).
+ * @flags2: additional flags containing dongle interface index.
+ * @data_offset: start of packet data. header is following by firmware signals.
+ */
+struct brcmf_proto_bcdc_header {
+	u8 flags;
+	u8 priority;
+	u8 flags2;
+	u8 data_offset;
+};
+
+/*
+ * maximum length of firmware signal data between
+ * the BCDC header and packet data in the tx path.
+ */
+#define BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES	12
+
+#define RETRIES 2 /* # of retries to retrieve matching dcmd response */
+#define BUS_HEADER_LEN	(16+64)		/* Must be atleast SDPCM_RESERVE
+					 * (amount of header tha might be added)
+					 * plus any space that might be needed
+					 * for bus alignment padding.
+					 */
+struct brcmf_bcdc {
+	u16 reqid;
+	u8 bus_header[BUS_HEADER_LEN];
+	struct brcmf_proto_bcdc_dcmd msg;
+	unsigned char buf[BRCMF_DCMD_MAXLEN];
+};
+
+
+static int
+brcmf_proto_bcdc_msg(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf,
+		     uint len, bool set)
+{
+	struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
+	struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg;
+	u32 flags;
+
+	brcmf_dbg(BCDC, "Enter\n");
+
+	memset(msg, 0, sizeof(struct brcmf_proto_bcdc_dcmd));
+
+	msg->cmd = cpu_to_le32(cmd);
+	msg->len = cpu_to_le32(len);
+	flags = (++bcdc->reqid << BCDC_DCMD_ID_SHIFT);
+	if (set)
+		flags |= BCDC_DCMD_SET;
+	flags = (flags & ~BCDC_DCMD_IF_MASK) |
+		(ifidx << BCDC_DCMD_IF_SHIFT);
+	msg->flags = cpu_to_le32(flags);
+
+	if (buf)
+		memcpy(bcdc->buf, buf, len);
+
+	len += sizeof(*msg);
+	if (len > BRCMF_TX_IOCTL_MAX_MSG_SIZE)
+		len = BRCMF_TX_IOCTL_MAX_MSG_SIZE;
+
+	/* Send request */
+	return brcmf_bus_txctl(drvr->bus_if, (unsigned char *)&bcdc->msg, len);
+}
+
+static int brcmf_proto_bcdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
+{
+	int ret;
+	struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
+
+	brcmf_dbg(BCDC, "Enter\n");
+	len += sizeof(struct brcmf_proto_bcdc_dcmd);
+	do {
+		ret = brcmf_bus_rxctl(drvr->bus_if, (unsigned char *)&bcdc->msg,
+				      len);
+		if (ret < 0)
+			break;
+	} while (BCDC_DCMD_ID(le32_to_cpu(bcdc->msg.flags)) != id);
+
+	return ret;
+}
+
+static int
+brcmf_proto_bcdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+			    void *buf, uint len)
+{
+	struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
+	struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg;
+	void *info;
+	int ret = 0, retries = 0;
+	u32 id, flags;
+
+	brcmf_dbg(BCDC, "Enter, cmd %d len %d\n", cmd, len);
+
+	ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, false);
+	if (ret < 0) {
+		brcmf_err("brcmf_proto_bcdc_msg failed w/status %d\n",
+			  ret);
+		goto done;
+	}
+
+retry:
+	/* wait for interrupt and get first fragment */
+	ret = brcmf_proto_bcdc_cmplt(drvr, bcdc->reqid, len);
+	if (ret < 0)
+		goto done;
+
+	flags = le32_to_cpu(msg->flags);
+	id = (flags & BCDC_DCMD_ID_MASK) >> BCDC_DCMD_ID_SHIFT;
+
+	if ((id < bcdc->reqid) && (++retries < RETRIES))
+		goto retry;
+	if (id != bcdc->reqid) {
+		brcmf_err("%s: unexpected request id %d (expected %d)\n",
+			  brcmf_ifname(drvr, ifidx), id, bcdc->reqid);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Check info buffer */
+	info = (void *)&msg[1];
+
+	/* Copy info buffer */
+	if (buf) {
+		if (ret < (int)len)
+			len = ret;
+		memcpy(buf, info, len);
+	}
+
+	/* Check the ERROR flag */
+	if (flags & BCDC_DCMD_ERROR)
+		ret = le32_to_cpu(msg->status);
+
+done:
+	return ret;
+}
+
+static int
+brcmf_proto_bcdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+			  void *buf, uint len)
+{
+	struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
+	struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg;
+	int ret = 0;
+	u32 flags, id;
+
+	brcmf_dbg(BCDC, "Enter, cmd %d len %d\n", cmd, len);
+
+	ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, true);
+	if (ret < 0)
+		goto done;
+
+	ret = brcmf_proto_bcdc_cmplt(drvr, bcdc->reqid, len);
+	if (ret < 0)
+		goto done;
+
+	flags = le32_to_cpu(msg->flags);
+	id = (flags & BCDC_DCMD_ID_MASK) >> BCDC_DCMD_ID_SHIFT;
+
+	if (id != bcdc->reqid) {
+		brcmf_err("%s: unexpected request id %d (expected %d)\n",
+			  brcmf_ifname(drvr, ifidx), id, bcdc->reqid);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Check the ERROR flag */
+	if (flags & BCDC_DCMD_ERROR)
+		ret = le32_to_cpu(msg->status);
+
+done:
+	return ret;
+}
+
+static void
+brcmf_proto_bcdc_hdrpush(struct brcmf_pub *drvr, int ifidx, u8 offset,
+			 struct sk_buff *pktbuf)
+{
+	struct brcmf_proto_bcdc_header *h;
+
+	brcmf_dbg(BCDC, "Enter\n");
+
+	/* Push BDC header used to convey priority for buses that don't */
+	skb_push(pktbuf, BCDC_HEADER_LEN);
+
+	h = (struct brcmf_proto_bcdc_header *)(pktbuf->data);
+
+	h->flags = (BCDC_PROTO_VER << BCDC_FLAG_VER_SHIFT);
+	if (pktbuf->ip_summed == CHECKSUM_PARTIAL)
+		h->flags |= BCDC_FLAG_SUM_NEEDED;
+
+	h->priority = (pktbuf->priority & BCDC_PRIORITY_MASK);
+	h->flags2 = 0;
+	h->data_offset = offset;
+	BCDC_SET_IF_IDX(h, ifidx);
+	trace_brcmf_bcdchdr(pktbuf->data);
+}
+
+static int
+brcmf_proto_bcdc_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
+			 struct sk_buff *pktbuf)
+{
+	struct brcmf_proto_bcdc_header *h;
+
+	brcmf_dbg(BCDC, "Enter\n");
+
+	/* Pop BCDC header used to convey priority for buses that don't */
+	if (pktbuf->len <= BCDC_HEADER_LEN) {
+		brcmf_dbg(INFO, "rx data too short (%d <= %d)\n",
+			  pktbuf->len, BCDC_HEADER_LEN);
+		return -EBADE;
+	}
+
+	trace_brcmf_bcdchdr(pktbuf->data);
+	h = (struct brcmf_proto_bcdc_header *)(pktbuf->data);
+
+	*ifidx = BCDC_GET_IF_IDX(h);
+	if (*ifidx >= BRCMF_MAX_IFS) {
+		brcmf_err("rx data ifnum out of range (%d)\n", *ifidx);
+		return -EBADE;
+	}
+	/* The ifidx is the idx to map to matching netdev/ifp. When receiving
+	 * events this is easy because it contains the bssidx which maps
+	 * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
+	 * bssidx 1 is used for p2p0 and no data can be received or
+	 * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
+	 */
+	if (*ifidx)
+		(*ifidx)++;
+
+	if (((h->flags & BCDC_FLAG_VER_MASK) >> BCDC_FLAG_VER_SHIFT) !=
+	    BCDC_PROTO_VER) {
+		brcmf_err("%s: non-BCDC packet received, flags 0x%x\n",
+			  brcmf_ifname(drvr, *ifidx), h->flags);
+		return -EBADE;
+	}
+
+	if (h->flags & BCDC_FLAG_SUM_GOOD) {
+		brcmf_dbg(BCDC, "%s: BDC rcv, good checksum, flags 0x%x\n",
+			  brcmf_ifname(drvr, *ifidx), h->flags);
+		pktbuf->ip_summed = CHECKSUM_UNNECESSARY;
+	}
+
+	pktbuf->priority = h->priority & BCDC_PRIORITY_MASK;
+
+	skb_pull(pktbuf, BCDC_HEADER_LEN);
+	if (do_fws)
+		brcmf_fws_hdrpull(drvr, *ifidx, h->data_offset << 2, pktbuf);
+	else
+		skb_pull(pktbuf, h->data_offset << 2);
+
+	if (pktbuf->len == 0)
+		return -ENODATA;
+	return 0;
+}
+
+static int
+brcmf_proto_bcdc_txdata(struct brcmf_pub *drvr, int ifidx, u8 offset,
+			struct sk_buff *pktbuf)
+{
+	brcmf_proto_bcdc_hdrpush(drvr, ifidx, offset, pktbuf);
+	return brcmf_bus_txdata(drvr->bus_if, pktbuf);
+}
+
+
+int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
+{
+	struct brcmf_bcdc *bcdc;
+
+	bcdc = kzalloc(sizeof(*bcdc), GFP_ATOMIC);
+	if (!bcdc)
+		goto fail;
+
+	/* ensure that the msg buf directly follows the cdc msg struct */
+	if ((unsigned long)(&bcdc->msg + 1) != (unsigned long)bcdc->buf) {
+		brcmf_err("struct brcmf_proto_bcdc is not correctly defined\n");
+		goto fail;
+	}
+
+	drvr->proto->hdrpull = brcmf_proto_bcdc_hdrpull;
+	drvr->proto->query_dcmd = brcmf_proto_bcdc_query_dcmd;
+	drvr->proto->set_dcmd = brcmf_proto_bcdc_set_dcmd;
+	drvr->proto->txdata = brcmf_proto_bcdc_txdata;
+	drvr->proto->pd = bcdc;
+
+	drvr->hdrlen += BCDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
+	drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN +
+			sizeof(struct brcmf_proto_bcdc_dcmd);
+	return 0;
+
+fail:
+	kfree(bcdc);
+	return -ENOMEM;
+}
+
+void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr)
+{
+	kfree(drvr->proto->pd);
+	drvr->proto->pd = NULL;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h
new file mode 100644
index 000000000000..17e8c039ff32
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_BCDC_H
+#define BRCMFMAC_BCDC_H
+
+
+int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr);
+void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr);
+
+
+#endif /* BRCMFMAC_BCDC_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 3e10b801eee8..fa35b23bbaa7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -17,16 +17,23 @@
 
 #include <linux/types.h>
 #include <linux/netdevice.h>
-#include <linux/export.h>
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/sched.h>
 #include <linux/completion.h>
 #include <linux/scatterlist.h>
 #include <linux/mmc/sdio.h>
+#include <linux/mmc/core.h>
 #include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
 #include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/platform_device.h>
 #include <linux/platform_data/brcmfmac-sdio.h>
+#include <linux/suspend.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <net/cfg80211.h>
 
 #include <defs.h>
 #include <brcm_hw_ids.h>
@@ -36,11 +43,19 @@
 #include "dhd_bus.h"
 #include "dhd_dbg.h"
 #include "sdio_host.h"
+#include "sdio_chip.h"
 
 #define SDIOH_API_ACCESS_RETRY_LIMIT	2
 
+#define DMA_ALIGN_MASK	0x03
 
-static irqreturn_t brcmf_sdio_oob_irqhandler(int irq, void *dev_id)
+#define SDIO_FUNC1_BLOCKSIZE		64
+#define SDIO_FUNC2_BLOCKSIZE		512
+/* Maximum milliseconds to wait for F2 to come up */
+#define SDIO_WAIT_F2RDY	3000
+
+
+static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
 {
 	struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
@@ -55,27 +70,46 @@ static irqreturn_t brcmf_sdio_oob_irqhandler(int irq, void *dev_id)
 		sdiodev->irq_en = false;
 	}
 
-	brcmf_sdbrcm_isr(sdiodev->bus);
+	brcmf_sdio_isr(sdiodev->bus);
 
 	return IRQ_HANDLED;
 }
 
-static void brcmf_sdio_ib_irqhandler(struct sdio_func *func)
+static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func)
 {
 	struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
 
 	brcmf_dbg(INTR, "IB intr triggered\n");
 
-	brcmf_sdbrcm_isr(sdiodev->bus);
+	brcmf_sdio_isr(sdiodev->bus);
 }
 
 /* dummy handler for SDIO function 2 interrupt */
-static void brcmf_sdio_dummy_irqhandler(struct sdio_func *func)
+static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func)
 {
 }
 
-int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
+static bool brcmf_sdiod_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
+{
+	bool is_err = false;
+#ifdef CONFIG_PM_SLEEP
+	is_err = atomic_read(&sdiodev->suspend);
+#endif
+	return is_err;
+}
+
+static void brcmf_sdiod_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
+				       wait_queue_head_t *wq)
+{
+#ifdef CONFIG_PM_SLEEP
+	int retry = 0;
+	while (atomic_read(&sdiodev->suspend) && retry++ != 30)
+		wait_event_timeout(*wq, false, HZ/100);
+#endif
+}
+
+int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
 {
 	int ret = 0;
 	u8 data;
@@ -85,7 +119,7 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
 		brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
 			  sdiodev->pdata->oob_irq_nr);
 		ret = request_irq(sdiodev->pdata->oob_irq_nr,
-				  brcmf_sdio_oob_irqhandler,
+				  brcmf_sdiod_oob_irqhandler,
 				  sdiodev->pdata->oob_irq_flags,
 				  "brcmf_oob_intr",
 				  &sdiodev->func[1]->dev);
@@ -109,36 +143,36 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
 		sdio_claim_host(sdiodev->func[1]);
 
 		/* must configure SDIO_CCCR_IENx to enable irq */
-		data = brcmf_sdio_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
+		data = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
 		data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
-		brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
+		brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
 
 		/* redirect, configure and enable io for interrupt signal */
 		data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
 		if (sdiodev->pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
 			data |= SDIO_SEPINT_ACT_HI;
-		brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
+		brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
 
 		sdio_release_host(sdiodev->func[1]);
 	} else {
 		brcmf_dbg(SDIO, "Entering\n");
 		sdio_claim_host(sdiodev->func[1]);
-		sdio_claim_irq(sdiodev->func[1], brcmf_sdio_ib_irqhandler);
-		sdio_claim_irq(sdiodev->func[2], brcmf_sdio_dummy_irqhandler);
+		sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler);
+		sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler);
 		sdio_release_host(sdiodev->func[1]);
 	}
 
 	return 0;
 }
 
-int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
+int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
 {
 	brcmf_dbg(SDIO, "Entering\n");
 
 	if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
 		sdio_claim_host(sdiodev->func[1]);
-		brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
-		brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
+		brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
+		brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
 		sdio_release_host(sdiodev->func[1]);
 
 		if (sdiodev->oob_irq_requested) {
@@ -161,29 +195,150 @@ int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
 	return 0;
 }
 
+static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
+					uint regaddr, u8 byte)
+{
+	int err_ret;
+
+	/*
+	 * Can only directly write to some F0 registers.
+	 * Handle CCCR_IENx and CCCR_ABORT command
+	 * as a special case.
+	 */
+	if ((regaddr == SDIO_CCCR_ABORT) ||
+	    (regaddr == SDIO_CCCR_IENx))
+		sdio_writeb(func, byte, regaddr, &err_ret);
+	else
+		sdio_f0_writeb(func, byte, regaddr, &err_ret);
+
+	return err_ret;
+}
+
+static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
+				    u32 addr, u8 regsz, void *data, bool write)
+{
+	struct sdio_func *func;
+	int ret;
+
+	brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
+		  write, fn, addr, regsz);
+
+	brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
+	if (brcmf_sdiod_pm_resume_error(sdiodev))
+		return -EIO;
+
+	/* only allow byte access on F0 */
+	if (WARN_ON(regsz > 1 && !fn))
+		return -EINVAL;
+	func = sdiodev->func[fn];
+
+	switch (regsz) {
+	case sizeof(u8):
+		if (write) {
+			if (fn)
+				sdio_writeb(func, *(u8 *)data, addr, &ret);
+			else
+				ret = brcmf_sdiod_f0_writeb(func, addr,
+							    *(u8 *)data);
+		} else {
+			if (fn)
+				*(u8 *)data = sdio_readb(func, addr, &ret);
+			else
+				*(u8 *)data = sdio_f0_readb(func, addr, &ret);
+		}
+		break;
+	case sizeof(u16):
+		if (write)
+			sdio_writew(func, *(u16 *)data, addr, &ret);
+		else
+			*(u16 *)data = sdio_readw(func, addr, &ret);
+		break;
+	case sizeof(u32):
+		if (write)
+			sdio_writel(func, *(u32 *)data, addr, &ret);
+		else
+			*(u32 *)data = sdio_readl(func, addr, &ret);
+		break;
+	default:
+		brcmf_err("invalid size: %d\n", regsz);
+		break;
+	}
+
+	if (ret) {
+		/*
+		 * SleepCSR register access can fail when
+		 * waking up the device so reduce this noise
+		 * in the logs.
+		 */
+		if (addr != SBSDIO_FUNC1_SLEEPCSR)
+			brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
+				  write ? "write" : "read", fn, addr, ret);
+		else
+			brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
+				  write ? "write" : "read", fn, addr, ret);
+	}
+	return ret;
+}
+
+static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
+				   u8 regsz, void *data, bool write)
+{
+	u8 func_num;
+	s32 retry = 0;
+	int ret;
+
+	if (sdiodev->bus_if->state == BRCMF_BUS_NOMEDIUM)
+		return -ENOMEDIUM;
+
+	/*
+	 * figure out how to read the register based on address range
+	 * 0x00 ~ 0x7FF: function 0 CCCR and FBR
+	 * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
+	 * The rest: function 1 silicon backplane core registers
+	 */
+	if ((addr & ~REG_F0_REG_MASK) == 0)
+		func_num = SDIO_FUNC_0;
+	else
+		func_num = SDIO_FUNC_1;
+
+	do {
+		if (!write)
+			memset(data, 0, regsz);
+		/* for retry wait for 1 ms till bus get settled down */
+		if (retry)
+			usleep_range(1000, 2000);
+		ret = brcmf_sdiod_request_data(sdiodev, func_num, addr, regsz,
+					       data, write);
+	} while (ret != 0 && ret != -ENOMEDIUM &&
+		 retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
+
+	if (ret == -ENOMEDIUM)
+		brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM);
+	else if (ret != 0)
+		brcmf_err("failed with %d\n", ret);
+
+	return ret;
+}
+
 static int
-brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
+brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
 {
 	int err = 0, i;
 	u8 addr[3];
-	s32 retry;
+
+	if (sdiodev->bus_if->state == BRCMF_BUS_NOMEDIUM)
+		return -ENOMEDIUM;
 
 	addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
 	addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
 	addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
 
 	for (i = 0; i < 3; i++) {
-		retry = 0;
-		do {
-			if (retry)
-				usleep_range(1000, 2000);
-			err = brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE,
-					SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW + i,
-					&addr[i]);
-		} while (err != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
-
+		err = brcmf_sdiod_regrw_helper(sdiodev,
+					       SBSDIO_FUNC1_SBADDRLOW + i,
+					       sizeof(u8), &addr[i], true);
 		if (err) {
-			brcmf_err("failed at addr:0x%0x\n",
+			brcmf_err("failed at addr: 0x%0x\n",
 				  SBSDIO_FUNC1_SBADDRLOW + i);
 			break;
 		}
@@ -193,13 +348,13 @@ brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
 }
 
 static int
-brcmf_sdio_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
+brcmf_sdiod_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
 {
 	uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
 	int err = 0;
 
 	if (bar0 != sdiodev->sbwad) {
-		err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
+		err = brcmf_sdiod_set_sbaddr_window(sdiodev, bar0);
 		if (err)
 			return err;
 
@@ -214,62 +369,14 @@ brcmf_sdio_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
 	return 0;
 }
 
-int
-brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
-			void *data, bool write)
-{
-	u8 func_num, reg_size;
-	s32 retry = 0;
-	int ret;
-
-	/*
-	 * figure out how to read the register based on address range
-	 * 0x00 ~ 0x7FF: function 0 CCCR and FBR
-	 * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
-	 * The rest: function 1 silicon backplane core registers
-	 */
-	if ((addr & ~REG_F0_REG_MASK) == 0) {
-		func_num = SDIO_FUNC_0;
-		reg_size = 1;
-	} else if ((addr & ~REG_F1_MISC_MASK) == 0) {
-		func_num = SDIO_FUNC_1;
-		reg_size = 1;
-	} else {
-		func_num = SDIO_FUNC_1;
-		reg_size = 4;
-
-		ret = brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
-		if (ret)
-			goto done;
-	}
-
-	do {
-		if (!write)
-			memset(data, 0, reg_size);
-		if (retry)	/* wait for 1 ms till bus get settled down */
-			usleep_range(1000, 2000);
-		if (reg_size == 1)
-			ret = brcmf_sdioh_request_byte(sdiodev, write,
-						       func_num, addr, data);
-		else
-			ret = brcmf_sdioh_request_word(sdiodev, write,
-						       func_num, addr, data, 4);
-	} while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
-
-done:
-	if (ret != 0)
-		brcmf_err("failed with %d\n", ret);
-
-	return ret;
-}
-
-u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
 {
 	u8 data;
 	int retval;
 
 	brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
-	retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
+	retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
+					  false);
 	brcmf_dbg(SDIO, "data:0x%02x\n", data);
 
 	if (ret)
@@ -278,52 +385,63 @@ u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
 	return data;
 }
 
-u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
 {
 	u32 data;
 	int retval;
 
 	brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
-	retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
+	retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
+	if (retval)
+		goto done;
+	retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
+					  false);
 	brcmf_dbg(SDIO, "data:0x%08x\n", data);
 
+done:
 	if (ret)
 		*ret = retval;
 
 	return data;
 }
 
-void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
+void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
 		      u8 data, int *ret)
 {
 	int retval;
 
 	brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
-	retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
-
+	retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
+					  true);
 	if (ret)
 		*ret = retval;
 }
 
-void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
+void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
 		      u32 data, int *ret)
 {
 	int retval;
 
 	brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
-	retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
+	retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
+	if (retval)
+		goto done;
+	retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
+					  true);
 
+done:
 	if (ret)
 		*ret = retval;
 }
 
-static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
+static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
 			     bool write, u32 addr, struct sk_buff *pkt)
 {
 	unsigned int req_sz;
+	int err;
 
-	brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
-	if (brcmf_pm_resume_error(sdiodev))
+	brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
+	if (brcmf_sdiod_pm_resume_error(sdiodev))
 		return -EIO;
 
 	/* Single skb use the standard mmc interface */
@@ -331,22 +449,22 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
 	req_sz &= (uint)~3;
 
 	if (write)
-		return sdio_memcpy_toio(sdiodev->func[fn], addr,
-					((u8 *)(pkt->data)),
-					req_sz);
+		err = sdio_memcpy_toio(sdiodev->func[fn], addr,
+				       ((u8 *)(pkt->data)), req_sz);
 	else if (fn == 1)
-		return sdio_memcpy_fromio(sdiodev->func[fn],
-					  ((u8 *)(pkt->data)),
-					  addr, req_sz);
+		err = sdio_memcpy_fromio(sdiodev->func[fn], ((u8 *)(pkt->data)),
+					 addr, req_sz);
 	else
 		/* function 2 read is FIFO operation */
-		return sdio_readsb(sdiodev->func[fn],
-				   ((u8 *)(pkt->data)), addr,
-				   req_sz);
+		err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
+				  req_sz);
+	if (err == -ENOMEDIUM)
+		brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM);
+	return err;
 }
 
 /**
- * brcmf_sdio_sglist_rw - SDIO interface function for block data access
+ * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
  * @sdiodev: brcmfmac sdio device
  * @fn: SDIO function number
  * @write: direction flag
@@ -357,9 +475,9 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
  * stack for block data access. It assumes that the skb passed down by the
  * caller has already been padded and aligned.
  */
-static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
-				bool write, u32 addr,
-				struct sk_buff_head *pktlist)
+static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
+				 bool write, u32 addr,
+				 struct sk_buff_head *pktlist)
 {
 	unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
 	unsigned int max_req_sz, orig_offset, dst_offset;
@@ -377,8 +495,8 @@ static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
 	if (!pktlist->qlen)
 		return -EINVAL;
 
-	brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
-	if (brcmf_pm_resume_error(sdiodev))
+	brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
+	if (brcmf_sdiod_pm_resume_error(sdiodev))
 		return -EIO;
 
 	target_list = pktlist;
@@ -485,7 +603,11 @@ static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
 		mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
 
 		ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
-		if (ret != 0) {
+		if (ret == -ENOMEDIUM) {
+			brcmf_bus_change_state(sdiodev->bus_if,
+					       BRCMF_BUS_NOMEDIUM);
+			break;
+		} else if (ret != 0) {
 			brcmf_err("CMD53 sg block %s failed %d\n",
 				  write ? "write" : "read", ret);
 			ret = -EIO;
@@ -525,9 +647,7 @@ exit:
 	return ret;
 }
 
-int
-brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-		      uint flags, u8 *buf, uint nbytes)
+int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
 {
 	struct sk_buff *mypkt;
 	int err;
@@ -539,7 +659,7 @@ brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
 		return -EIO;
 	}
 
-	err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt);
+	err = brcmf_sdiod_recv_pkt(sdiodev, mypkt);
 	if (!err)
 		memcpy(buf, mypkt->data, nbytes);
 
@@ -547,50 +667,47 @@ brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
 	return err;
 }
 
-int
-brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-		      uint flags, struct sk_buff *pkt)
+int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
 {
-	uint width;
+	u32 addr = sdiodev->sbwad;
 	int err = 0;
 
-	brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
-		  fn, addr, pkt->len);
+	brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
 
-	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
-	err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+	err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
 	if (err)
 		goto done;
 
-	err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pkt);
+	err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, pkt);
 
 done:
 	return err;
 }
 
-int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-			    uint flags, struct sk_buff_head *pktq, uint totlen)
+int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
+			   struct sk_buff_head *pktq, uint totlen)
 {
 	struct sk_buff *glom_skb;
 	struct sk_buff *skb;
-	uint width;
+	u32 addr = sdiodev->sbwad;
 	int err = 0;
 
-	brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
-		  fn, addr, pktq->qlen);
+	brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
+		  addr, pktq->qlen);
 
-	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
-	err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+	err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
 	if (err)
 		goto done;
 
 	if (pktq->qlen == 1)
-		err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq->next);
+		err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
+					 pktq->next);
 	else if (!sdiodev->sg_support) {
 		glom_skb = brcmu_pkt_buf_get_skb(totlen);
 		if (!glom_skb)
 			return -ENOMEM;
-		err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, glom_skb);
+		err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
+					 glom_skb);
 		if (err)
 			goto done;
 
@@ -599,18 +716,17 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
 			skb_pull(glom_skb, skb->len);
 		}
 	} else
-		err = brcmf_sdio_sglist_rw(sdiodev, fn, false, addr, pktq);
+		err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, false, addr,
+					    pktq);
 
 done:
 	return err;
 }
 
-int
-brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-		      uint flags, u8 *buf, uint nbytes)
+int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
 {
 	struct sk_buff *mypkt;
-	uint width;
+	u32 addr = sdiodev->sbwad;
 	int err;
 
 	mypkt = brcmu_pkt_buf_get_skb(nbytes);
@@ -622,48 +738,47 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
 
 	memcpy(mypkt->data, buf, nbytes);
 
-	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
-	err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+	err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
 
 	if (!err)
-		err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, mypkt);
+		err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, addr,
+					 mypkt);
 
 	brcmu_pkt_buf_free_skb(mypkt);
 	return err;
 
 }
 
-int
-brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-		      uint flags, struct sk_buff_head *pktq)
+int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
+			 struct sk_buff_head *pktq)
 {
 	struct sk_buff *skb;
-	uint width;
+	u32 addr = sdiodev->sbwad;
 	int err;
 
-	brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
-		  fn, addr, pktq->qlen);
+	brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
 
-	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
-	err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+	err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
 	if (err)
 		return err;
 
 	if (pktq->qlen == 1 || !sdiodev->sg_support)
 		skb_queue_walk(pktq, skb) {
-			err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, skb);
+			err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true,
+						 addr, skb);
 			if (err)
 				break;
 		}
 	else
-		err = brcmf_sdio_sglist_rw(sdiodev, fn, true, addr, pktq);
+		err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr,
+					    pktq);
 
 	return err;
 }
 
 int
-brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
-		 u8 *data, uint size)
+brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
+		  u8 *data, uint size)
 {
 	int bcmerror = 0;
 	struct sk_buff *pkt;
@@ -690,7 +805,7 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 	/* Do the transfer(s) */
 	while (size) {
 		/* Set the backplane window to include the start address */
-		bcmerror = brcmf_sdcard_set_sbaddr_window(sdiodev, address);
+		bcmerror = brcmf_sdiod_set_sbaddr_window(sdiodev, address);
 		if (bcmerror)
 			break;
 
@@ -704,8 +819,8 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 		skb_put(pkt, dsize);
 		if (write)
 			memcpy(pkt->data, data, dsize);
-		bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write,
-					     sdaddr, pkt);
+		bcmerror = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_1, write,
+					      sdaddr, pkt);
 		if (bcmerror) {
 			brcmf_err("membytes transfer failed\n");
 			break;
@@ -727,7 +842,7 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 	dev_kfree_skb(pkt);
 
 	/* Return the window to backplane enumeration space for core access */
-	if (brcmf_sdcard_set_sbaddr_window(sdiodev, sdiodev->sbwad))
+	if (brcmf_sdiod_set_sbaddr_window(sdiodev, sdiodev->sbwad))
 		brcmf_err("FAILED to set window back to 0x%x\n",
 			  sdiodev->sbwad);
 
@@ -736,67 +851,335 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 	return bcmerror;
 }
 
-int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
 {
 	char t_func = (char)fn;
 	brcmf_dbg(SDIO, "Enter\n");
 
 	/* issue abort cmd52 command through F0 */
-	brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
-				 SDIO_CCCR_ABORT, &t_func);
+	brcmf_sdiod_request_data(sdiodev, SDIO_FUNC_0, SDIO_CCCR_ABORT,
+				 sizeof(t_func), &t_func, true);
 
 	brcmf_dbg(SDIO, "Exit\n");
 	return 0;
 }
 
-int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
+static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
 {
-	u32 regs = 0;
+	if (sdiodev->bus) {
+		brcmf_sdio_remove(sdiodev->bus);
+		sdiodev->bus = NULL;
+	}
+
+	/* Disable Function 2 */
+	sdio_claim_host(sdiodev->func[2]);
+	sdio_disable_func(sdiodev->func[2]);
+	sdio_release_host(sdiodev->func[2]);
+
+	/* Disable Function 1 */
+	sdio_claim_host(sdiodev->func[1]);
+	sdio_disable_func(sdiodev->func[1]);
+	sdio_release_host(sdiodev->func[1]);
+
+	sdiodev->sbwad = 0;
+
+	return 0;
+}
+
+static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
+{
+	struct sdio_func *func;
+	struct mmc_host *host;
+	uint max_blocks;
 	int ret = 0;
 
-	ret = brcmf_sdioh_attach(sdiodev);
-	if (ret)
+	sdiodev->num_funcs = 2;
+
+	sdio_claim_host(sdiodev->func[1]);
+
+	ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
+	if (ret) {
+		brcmf_err("Failed to set F1 blocksize\n");
+		sdio_release_host(sdiodev->func[1]);
+		goto out;
+	}
+	ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
+	if (ret) {
+		brcmf_err("Failed to set F2 blocksize\n");
+		sdio_release_host(sdiodev->func[1]);
+		goto out;
+	}
+
+	/* increase F2 timeout */
+	sdiodev->func[2]->enable_timeout = SDIO_WAIT_F2RDY;
+
+	/* Enable Function 1 */
+	ret = sdio_enable_func(sdiodev->func[1]);
+	sdio_release_host(sdiodev->func[1]);
+	if (ret) {
+		brcmf_err("Failed to enable F1: err=%d\n", ret);
 		goto out;
+	}
 
-	regs = SI_ENUM_BASE;
+	/*
+	 * determine host related variables after brcmf_sdiod_probe()
+	 * as func->cur_blksize is properly set and F2 init has been
+	 * completed successfully.
+	 */
+	func = sdiodev->func[2];
+	host = func->card->host;
+	sdiodev->sg_support = host->max_segs > 1;
+	max_blocks = min_t(uint, host->max_blk_count, 511u);
+	sdiodev->max_request_size = min_t(uint, host->max_req_size,
+					  max_blocks * func->cur_blksize);
+	sdiodev->max_segment_count = min_t(uint, host->max_segs,
+					   SG_MAX_SINGLE_ALLOC);
+	sdiodev->max_segment_size = host->max_seg_size;
 
 	/* try to attach to the target device */
-	sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
+	sdiodev->bus = brcmf_sdio_probe(sdiodev);
 	if (!sdiodev->bus) {
-		brcmf_err("device attach failed\n");
 		ret = -ENODEV;
 		goto out;
 	}
 
 out:
 	if (ret)
-		brcmf_sdio_remove(sdiodev);
+		brcmf_sdiod_remove(sdiodev);
 
 	return ret;
 }
-EXPORT_SYMBOL(brcmf_sdio_probe);
 
-int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev)
+/* devices we support, null terminated */
+static const struct sdio_device_id brcmf_sdmmc_ids[] = {
+	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43143)},
+	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)},
+	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
+	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
+	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
+	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43362)},
+	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
+		     SDIO_DEVICE_ID_BROADCOM_4335_4339)},
+	{ /* end: all zeroes */ },
+};
+MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
+
+static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
+
+
+static int brcmf_ops_sdio_probe(struct sdio_func *func,
+				const struct sdio_device_id *id)
 {
-	sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+	int err;
+	struct brcmf_sdio_dev *sdiodev;
+	struct brcmf_bus *bus_if;
 
-	if (sdiodev->bus) {
-		brcmf_sdbrcm_disconnect(sdiodev->bus);
-		sdiodev->bus = NULL;
+	brcmf_dbg(SDIO, "Enter\n");
+	brcmf_dbg(SDIO, "Class=%x\n", func->class);
+	brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
+	brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
+	brcmf_dbg(SDIO, "Function#: %d\n", func->num);
+
+	/* Consume func num 1 but dont do anything with it. */
+	if (func->num == 1)
+		return 0;
+
+	/* Ignore anything but func 2 */
+	if (func->num != 2)
+		return -ENODEV;
+
+	bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
+	if (!bus_if)
+		return -ENOMEM;
+	sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
+	if (!sdiodev) {
+		kfree(bus_if);
+		return -ENOMEM;
 	}
 
-	brcmf_sdioh_detach(sdiodev);
+	/* store refs to functions used. mmc_card does
+	 * not hold the F0 function pointer.
+	 */
+	sdiodev->func[0] = kmemdup(func, sizeof(*func), GFP_KERNEL);
+	sdiodev->func[0]->num = 0;
+	sdiodev->func[1] = func->card->sdio_func[0];
+	sdiodev->func[2] = func;
+
+	sdiodev->bus_if = bus_if;
+	bus_if->bus_priv.sdio = sdiodev;
+	bus_if->proto_type = BRCMF_PROTO_BCDC;
+	dev_set_drvdata(&func->dev, bus_if);
+	dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
+	sdiodev->dev = &sdiodev->func[1]->dev;
+	sdiodev->pdata = brcmfmac_sdio_pdata;
+
+	atomic_set(&sdiodev->suspend, false);
+	init_waitqueue_head(&sdiodev->request_word_wait);
+	init_waitqueue_head(&sdiodev->request_buffer_wait);
+
+	brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
+	err = brcmf_sdiod_probe(sdiodev);
+	if (err) {
+		brcmf_err("F2 error, probe failed %d...\n", err);
+		goto fail;
+	}
 
-	sdiodev->sbwad = 0;
+	brcmf_dbg(SDIO, "F2 init completed...\n");
+	return 0;
+
+fail:
+	dev_set_drvdata(&func->dev, NULL);
+	dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
+	kfree(sdiodev->func[0]);
+	kfree(sdiodev);
+	kfree(bus_if);
+	return err;
+}
+
+static void brcmf_ops_sdio_remove(struct sdio_func *func)
+{
+	struct brcmf_bus *bus_if;
+	struct brcmf_sdio_dev *sdiodev;
+
+	brcmf_dbg(SDIO, "Enter\n");
+	brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
+	brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
+	brcmf_dbg(SDIO, "Function: %d\n", func->num);
+
+	if (func->num != 1 && func->num != 2)
+		return;
+
+	bus_if = dev_get_drvdata(&func->dev);
+	if (bus_if) {
+		sdiodev = bus_if->bus_priv.sdio;
+		brcmf_sdiod_remove(sdiodev);
+
+		dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
+		dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
+
+		kfree(bus_if);
+		kfree(sdiodev->func[0]);
+		kfree(sdiodev);
+	}
+
+	brcmf_dbg(SDIO, "Exit\n");
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int brcmf_ops_sdio_suspend(struct device *dev)
+{
+	mmc_pm_flag_t sdio_flags;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	int ret = 0;
+
+	brcmf_dbg(SDIO, "\n");
+
+	atomic_set(&sdiodev->suspend, true);
+
+	sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
+	if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
+		brcmf_err("Host can't keep power while suspended\n");
+		return -EINVAL;
+	}
+
+	ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
+	if (ret) {
+		brcmf_err("Failed to set pm_flags\n");
+		return ret;
+	}
+
+	brcmf_sdio_wd_timer(sdiodev->bus, 0);
+
+	return ret;
+}
+
+static int brcmf_ops_sdio_resume(struct device *dev)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+
+	brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
+	atomic_set(&sdiodev->suspend, false);
+	return 0;
+}
+
+static const struct dev_pm_ops brcmf_sdio_pm_ops = {
+	.suspend	= brcmf_ops_sdio_suspend,
+	.resume		= brcmf_ops_sdio_resume,
+};
+#endif	/* CONFIG_PM_SLEEP */
+
+static struct sdio_driver brcmf_sdmmc_driver = {
+	.probe = brcmf_ops_sdio_probe,
+	.remove = brcmf_ops_sdio_remove,
+	.name = BRCMFMAC_SDIO_PDATA_NAME,
+	.id_table = brcmf_sdmmc_ids,
+#ifdef CONFIG_PM_SLEEP
+	.drv = {
+		.pm = &brcmf_sdio_pm_ops,
+	},
+#endif	/* CONFIG_PM_SLEEP */
+};
+
+static int brcmf_sdio_pd_probe(struct platform_device *pdev)
+{
+	brcmf_dbg(SDIO, "Enter\n");
+
+	brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
+
+	if (brcmfmac_sdio_pdata->power_on)
+		brcmfmac_sdio_pdata->power_on();
 
 	return 0;
 }
-EXPORT_SYMBOL(brcmf_sdio_remove);
 
-void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable)
+static int brcmf_sdio_pd_remove(struct platform_device *pdev)
 {
-	if (enable)
-		brcmf_sdbrcm_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
+	brcmf_dbg(SDIO, "Enter\n");
+
+	if (brcmfmac_sdio_pdata->power_off)
+		brcmfmac_sdio_pdata->power_off();
+
+	sdio_unregister_driver(&brcmf_sdmmc_driver);
+
+	return 0;
+}
+
+static struct platform_driver brcmf_sdio_pd = {
+	.remove		= brcmf_sdio_pd_remove,
+	.driver		= {
+		.name	= BRCMFMAC_SDIO_PDATA_NAME,
+		.owner	= THIS_MODULE,
+	}
+};
+
+void brcmf_sdio_register(void)
+{
+	int ret;
+
+	ret = sdio_register_driver(&brcmf_sdmmc_driver);
+	if (ret)
+		brcmf_err("sdio_register_driver failed: %d\n", ret);
+}
+
+void brcmf_sdio_exit(void)
+{
+	brcmf_dbg(SDIO, "Enter\n");
+
+	if (brcmfmac_sdio_pdata)
+		platform_driver_unregister(&brcmf_sdio_pd);
 	else
-		brcmf_sdbrcm_wd_timer(sdiodev->bus, 0);
+		sdio_unregister_driver(&brcmf_sdmmc_driver);
+}
+
+void __init brcmf_sdio_init(void)
+{
+	int ret;
+
+	brcmf_dbg(SDIO, "Enter\n");
+
+	ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
+	if (ret == -ENODEV)
+		brcmf_dbg(SDIO, "No platform data available.\n");
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
deleted file mode 100644
index abc9ceca70f3..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ /dev/null
@@ -1,539 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/mmc/sdio.h>
-#include <linux/mmc/core.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
-#include <linux/suspend.h>
-#include <linux/errno.h>
-#include <linux/sched.h>	/* request_irq() */
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/brcmfmac-sdio.h>
-#include <net/cfg80211.h>
-
-#include <defs.h>
-#include <brcm_hw_ids.h>
-#include <brcmu_utils.h>
-#include <brcmu_wifi.h>
-#include "sdio_host.h"
-#include "sdio_chip.h"
-#include "dhd_dbg.h"
-#include "dhd_bus.h"
-
-#define SDIO_VENDOR_ID_BROADCOM		0x02d0
-
-#define DMA_ALIGN_MASK	0x03
-
-#define SDIO_FUNC1_BLOCKSIZE		64
-#define SDIO_FUNC2_BLOCKSIZE		512
-
-/* devices we support, null terminated */
-static const struct sdio_device_id brcmf_sdmmc_ids[] = {
-	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43143)},
-	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)},
-	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
-	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
-	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
-	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
-		     SDIO_DEVICE_ID_BROADCOM_4335_4339)},
-	{ /* end: all zeroes */ },
-};
-MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
-
-static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
-
-
-bool
-brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
-{
-	bool is_err = false;
-#ifdef CONFIG_PM_SLEEP
-	is_err = atomic_read(&sdiodev->suspend);
-#endif
-	return is_err;
-}
-
-void
-brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev, wait_queue_head_t *wq)
-{
-#ifdef CONFIG_PM_SLEEP
-	int retry = 0;
-	while (atomic_read(&sdiodev->suspend) && retry++ != 30)
-		wait_event_timeout(*wq, false, HZ/100);
-#endif
-}
-
-static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
-					    uint regaddr, u8 *byte)
-{
-	struct sdio_func *sdfunc = sdiodev->func[0];
-	int err_ret;
-
-	/*
-	 * Can only directly write to some F0 registers.
-	 * Handle F2 enable/disable and Abort command
-	 * as a special case.
-	 */
-	if (regaddr == SDIO_CCCR_IOEx) {
-		sdfunc = sdiodev->func[2];
-		if (sdfunc) {
-			if (*byte & SDIO_FUNC_ENABLE_2) {
-				/* Enable Function 2 */
-				err_ret = sdio_enable_func(sdfunc);
-				if (err_ret)
-					brcmf_err("enable F2 failed:%d\n",
-						  err_ret);
-			} else {
-				/* Disable Function 2 */
-				err_ret = sdio_disable_func(sdfunc);
-				if (err_ret)
-					brcmf_err("Disable F2 failed:%d\n",
-						  err_ret);
-			}
-		} else {
-			err_ret = -ENOENT;
-		}
-	} else if ((regaddr == SDIO_CCCR_ABORT) ||
-		   (regaddr == SDIO_CCCR_IENx)) {
-		sdfunc = kmemdup(sdiodev->func[0], sizeof(struct sdio_func),
-				 GFP_KERNEL);
-		if (!sdfunc)
-			return -ENOMEM;
-		sdfunc->num = 0;
-		sdio_writeb(sdfunc, *byte, regaddr, &err_ret);
-		kfree(sdfunc);
-	} else if (regaddr < 0xF0) {
-		brcmf_err("F0 Wr:0x%02x: write disallowed\n", regaddr);
-		err_ret = -EPERM;
-	} else {
-		sdio_f0_writeb(sdfunc, *byte, regaddr, &err_ret);
-	}
-
-	return err_ret;
-}
-
-int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
-			     uint regaddr, u8 *byte)
-{
-	int err_ret;
-
-	brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x\n", rw, func, regaddr);
-
-	brcmf_pm_resume_wait(sdiodev, &sdiodev->request_byte_wait);
-	if (brcmf_pm_resume_error(sdiodev))
-		return -EIO;
-
-	if (rw && func == 0) {
-		/* handle F0 separately */
-		err_ret = brcmf_sdioh_f0_write_byte(sdiodev, regaddr, byte);
-	} else {
-		if (rw) /* CMD52 Write */
-			sdio_writeb(sdiodev->func[func], *byte, regaddr,
-				    &err_ret);
-		else if (func == 0) {
-			*byte = sdio_f0_readb(sdiodev->func[func], regaddr,
-					      &err_ret);
-		} else {
-			*byte = sdio_readb(sdiodev->func[func], regaddr,
-					   &err_ret);
-		}
-	}
-
-	if (err_ret)
-		brcmf_err("Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
-			  rw ? "write" : "read", func, regaddr, *byte, err_ret);
-
-	return err_ret;
-}
-
-int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
-			     uint rw, uint func, uint addr, u32 *word,
-			     uint nbytes)
-{
-	int err_ret = -EIO;
-
-	if (func == 0) {
-		brcmf_err("Only CMD52 allowed to F0\n");
-		return -EINVAL;
-	}
-
-	brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
-		  rw, func, addr, nbytes);
-
-	brcmf_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
-	if (brcmf_pm_resume_error(sdiodev))
-		return -EIO;
-
-	if (rw) {		/* CMD52 Write */
-		if (nbytes == 4)
-			sdio_writel(sdiodev->func[func], *word, addr,
-				    &err_ret);
-		else if (nbytes == 2)
-			sdio_writew(sdiodev->func[func], (*word & 0xFFFF),
-				    addr, &err_ret);
-		else
-			brcmf_err("Invalid nbytes: %d\n", nbytes);
-	} else {		/* CMD52 Read */
-		if (nbytes == 4)
-			*word = sdio_readl(sdiodev->func[func], addr, &err_ret);
-		else if (nbytes == 2)
-			*word = sdio_readw(sdiodev->func[func], addr,
-					   &err_ret) & 0xFFFF;
-		else
-			brcmf_err("Invalid nbytes: %d\n", nbytes);
-	}
-
-	if (err_ret)
-		brcmf_err("Failed to %s word, Err: 0x%08x\n",
-			  rw ? "write" : "read", err_ret);
-
-	return err_ret;
-}
-
-static int brcmf_sdioh_get_cisaddr(struct brcmf_sdio_dev *sdiodev, u32 regaddr)
-{
-	/* read 24 bits and return valid 17 bit addr */
-	int i, ret;
-	u32 scratch, regdata;
-	__le32 scratch_le;
-	u8 *ptr = (u8 *)&scratch_le;
-
-	for (i = 0; i < 3; i++) {
-		regdata = brcmf_sdio_regrl(sdiodev, regaddr, &ret);
-		if (ret != 0)
-			brcmf_err("Can't read!\n");
-
-		*ptr++ = (u8) regdata;
-		regaddr++;
-	}
-
-	/* Only the lower 17-bits are valid */
-	scratch = le32_to_cpu(scratch_le);
-	scratch &= 0x0001FFFF;
-	return scratch;
-}
-
-static int brcmf_sdioh_enablefuncs(struct brcmf_sdio_dev *sdiodev)
-{
-	int err_ret;
-	u32 fbraddr;
-	u8 func;
-
-	brcmf_dbg(SDIO, "\n");
-
-	/* Get the Card's common CIS address */
-	sdiodev->func_cis_ptr[0] = brcmf_sdioh_get_cisaddr(sdiodev,
-							   SDIO_CCCR_CIS);
-	brcmf_dbg(SDIO, "Card's Common CIS Ptr = 0x%x\n",
-		  sdiodev->func_cis_ptr[0]);
-
-	/* Get the Card's function CIS (for each function) */
-	for (fbraddr = SDIO_FBR_BASE(1), func = 1;
-	     func <= sdiodev->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
-		sdiodev->func_cis_ptr[func] =
-		    brcmf_sdioh_get_cisaddr(sdiodev, SDIO_FBR_CIS + fbraddr);
-		brcmf_dbg(SDIO, "Function %d CIS Ptr = 0x%x\n",
-			  func, sdiodev->func_cis_ptr[func]);
-	}
-
-	/* Enable Function 1 */
-	err_ret = sdio_enable_func(sdiodev->func[1]);
-	if (err_ret)
-		brcmf_err("Failed to enable F1 Err: 0x%08x\n", err_ret);
-
-	return false;
-}
-
-/*
- *	Public entry points & extern's
- */
-int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev)
-{
-	int err_ret = 0;
-
-	brcmf_dbg(SDIO, "\n");
-
-	sdiodev->num_funcs = 2;
-
-	sdio_claim_host(sdiodev->func[1]);
-
-	err_ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
-	if (err_ret) {
-		brcmf_err("Failed to set F1 blocksize\n");
-		goto out;
-	}
-
-	err_ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
-	if (err_ret) {
-		brcmf_err("Failed to set F2 blocksize\n");
-		goto out;
-	}
-
-	brcmf_sdioh_enablefuncs(sdiodev);
-
-out:
-	sdio_release_host(sdiodev->func[1]);
-	brcmf_dbg(SDIO, "Done\n");
-	return err_ret;
-}
-
-void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev)
-{
-	brcmf_dbg(SDIO, "\n");
-
-	/* Disable Function 2 */
-	sdio_claim_host(sdiodev->func[2]);
-	sdio_disable_func(sdiodev->func[2]);
-	sdio_release_host(sdiodev->func[2]);
-
-	/* Disable Function 1 */
-	sdio_claim_host(sdiodev->func[1]);
-	sdio_disable_func(sdiodev->func[1]);
-	sdio_release_host(sdiodev->func[1]);
-
-}
-
-static int brcmf_ops_sdio_probe(struct sdio_func *func,
-				const struct sdio_device_id *id)
-{
-	int err;
-	struct brcmf_sdio_dev *sdiodev;
-	struct brcmf_bus *bus_if;
-	struct mmc_host *host;
-	uint max_blocks;
-
-	brcmf_dbg(SDIO, "Enter\n");
-	brcmf_dbg(SDIO, "Class=%x\n", func->class);
-	brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
-	brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
-	brcmf_dbg(SDIO, "Function#: %d\n", func->num);
-
-	/* Consume func num 1 but dont do anything with it. */
-	if (func->num == 1)
-		return 0;
-
-	/* Ignore anything but func 2 */
-	if (func->num != 2)
-		return -ENODEV;
-
-	bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
-	if (!bus_if)
-		return -ENOMEM;
-	sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
-	if (!sdiodev) {
-		kfree(bus_if);
-		return -ENOMEM;
-	}
-
-	sdiodev->func[0] = func->card->sdio_func[0];
-	sdiodev->func[1] = func->card->sdio_func[0];
-	sdiodev->func[2] = func;
-
-	sdiodev->bus_if = bus_if;
-	bus_if->bus_priv.sdio = sdiodev;
-	dev_set_drvdata(&func->dev, bus_if);
-	dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
-	sdiodev->dev = &sdiodev->func[1]->dev;
-	sdiodev->pdata = brcmfmac_sdio_pdata;
-
-	atomic_set(&sdiodev->suspend, false);
-	init_waitqueue_head(&sdiodev->request_byte_wait);
-	init_waitqueue_head(&sdiodev->request_word_wait);
-	init_waitqueue_head(&sdiodev->request_buffer_wait);
-
-	brcmf_dbg(SDIO, "F2 found, calling brcmf_sdio_probe...\n");
-	err = brcmf_sdio_probe(sdiodev);
-	if (err) {
-		brcmf_err("F2 error, probe failed %d...\n", err);
-		goto fail;
-	}
-
-	/*
-	 * determine host related variables after brcmf_sdio_probe()
-	 * as func->cur_blksize is properly set and F2 init has been
-	 * completed successfully.
-	 */
-	host = func->card->host;
-	sdiodev->sg_support = host->max_segs > 1;
-	max_blocks = min_t(uint, host->max_blk_count, 511u);
-	sdiodev->max_request_size = min_t(uint, host->max_req_size,
-					  max_blocks * func->cur_blksize);
-	sdiodev->max_segment_count = min_t(uint, host->max_segs,
-					   SG_MAX_SINGLE_ALLOC);
-	sdiodev->max_segment_size = host->max_seg_size;
-	brcmf_dbg(SDIO, "F2 init completed...\n");
-	return 0;
-
-fail:
-	dev_set_drvdata(&func->dev, NULL);
-	dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
-	kfree(sdiodev);
-	kfree(bus_if);
-	return err;
-}
-
-static void brcmf_ops_sdio_remove(struct sdio_func *func)
-{
-	struct brcmf_bus *bus_if;
-	struct brcmf_sdio_dev *sdiodev;
-
-	brcmf_dbg(SDIO, "Enter\n");
-	brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
-	brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
-	brcmf_dbg(SDIO, "Function: %d\n", func->num);
-
-	if (func->num != 1 && func->num != 2)
-		return;
-
-	bus_if = dev_get_drvdata(&func->dev);
-	if (bus_if) {
-		sdiodev = bus_if->bus_priv.sdio;
-		brcmf_sdio_remove(sdiodev);
-
-		dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
-		dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
-
-		kfree(bus_if);
-		kfree(sdiodev);
-	}
-
-	brcmf_dbg(SDIO, "Exit\n");
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int brcmf_sdio_suspend(struct device *dev)
-{
-	mmc_pm_flag_t sdio_flags;
-	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
-	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
-	int ret = 0;
-
-	brcmf_dbg(SDIO, "\n");
-
-	atomic_set(&sdiodev->suspend, true);
-
-	sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
-	if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
-		brcmf_err("Host can't keep power while suspended\n");
-		return -EINVAL;
-	}
-
-	ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
-	if (ret) {
-		brcmf_err("Failed to set pm_flags\n");
-		return ret;
-	}
-
-	brcmf_sdio_wdtmr_enable(sdiodev, false);
-
-	return ret;
-}
-
-static int brcmf_sdio_resume(struct device *dev)
-{
-	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
-	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
-
-	brcmf_sdio_wdtmr_enable(sdiodev, true);
-	atomic_set(&sdiodev->suspend, false);
-	return 0;
-}
-
-static const struct dev_pm_ops brcmf_sdio_pm_ops = {
-	.suspend	= brcmf_sdio_suspend,
-	.resume		= brcmf_sdio_resume,
-};
-#endif	/* CONFIG_PM_SLEEP */
-
-static struct sdio_driver brcmf_sdmmc_driver = {
-	.probe = brcmf_ops_sdio_probe,
-	.remove = brcmf_ops_sdio_remove,
-	.name = BRCMFMAC_SDIO_PDATA_NAME,
-	.id_table = brcmf_sdmmc_ids,
-#ifdef CONFIG_PM_SLEEP
-	.drv = {
-		.pm = &brcmf_sdio_pm_ops,
-	},
-#endif	/* CONFIG_PM_SLEEP */
-};
-
-static int brcmf_sdio_pd_probe(struct platform_device *pdev)
-{
-	brcmf_dbg(SDIO, "Enter\n");
-
-	brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
-
-	if (brcmfmac_sdio_pdata->power_on)
-		brcmfmac_sdio_pdata->power_on();
-
-	return 0;
-}
-
-static int brcmf_sdio_pd_remove(struct platform_device *pdev)
-{
-	brcmf_dbg(SDIO, "Enter\n");
-
-	if (brcmfmac_sdio_pdata->power_off)
-		brcmfmac_sdio_pdata->power_off();
-
-	sdio_unregister_driver(&brcmf_sdmmc_driver);
-
-	return 0;
-}
-
-static struct platform_driver brcmf_sdio_pd = {
-	.remove		= brcmf_sdio_pd_remove,
-	.driver		= {
-		.name	= BRCMFMAC_SDIO_PDATA_NAME,
-		.owner	= THIS_MODULE,
-	}
-};
-
-void brcmf_sdio_register(void)
-{
-	int ret;
-
-	ret = sdio_register_driver(&brcmf_sdmmc_driver);
-	if (ret)
-		brcmf_err("sdio_register_driver failed: %d\n", ret);
-}
-
-void brcmf_sdio_exit(void)
-{
-	brcmf_dbg(SDIO, "Enter\n");
-
-	if (brcmfmac_sdio_pdata)
-		platform_driver_unregister(&brcmf_sdio_pd);
-	else
-		sdio_unregister_driver(&brcmf_sdmmc_driver);
-}
-
-void __init brcmf_sdio_init(void)
-{
-	int ret;
-
-	brcmf_dbg(SDIO, "Enter\n");
-
-	ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
-	if (ret == -ENODEV)
-		brcmf_dbg(SDIO, "No platform data available.\n");
-}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 899a2ada5b82..939d6b132922 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -21,481 +21,33 @@
 #ifndef _BRCMF_H_
 #define _BRCMF_H_
 
-#define BRCMF_VERSION_STR		"4.218.248.5"
-
 #include "fweh.h"
 
-/*******************************************************************************
- * IO codes that are interpreted by dongle firmware
- ******************************************************************************/
-#define BRCMF_C_GET_VERSION			1
-#define BRCMF_C_UP				2
-#define BRCMF_C_DOWN				3
-#define BRCMF_C_SET_PROMISC			10
-#define BRCMF_C_GET_RATE			12
-#define BRCMF_C_GET_INFRA			19
-#define BRCMF_C_SET_INFRA			20
-#define BRCMF_C_GET_AUTH			21
-#define BRCMF_C_SET_AUTH			22
-#define BRCMF_C_GET_BSSID			23
-#define BRCMF_C_GET_SSID			25
-#define BRCMF_C_SET_SSID			26
-#define BRCMF_C_TERMINATED			28
-#define BRCMF_C_GET_CHANNEL			29
-#define BRCMF_C_SET_CHANNEL			30
-#define BRCMF_C_GET_SRL				31
-#define BRCMF_C_SET_SRL				32
-#define BRCMF_C_GET_LRL				33
-#define BRCMF_C_SET_LRL				34
-#define BRCMF_C_GET_RADIO			37
-#define BRCMF_C_SET_RADIO			38
-#define BRCMF_C_GET_PHYTYPE			39
-#define BRCMF_C_SET_KEY				45
-#define BRCMF_C_SET_PASSIVE_SCAN		49
-#define BRCMF_C_SCAN				50
-#define BRCMF_C_SCAN_RESULTS			51
-#define BRCMF_C_DISASSOC			52
-#define BRCMF_C_REASSOC				53
-#define BRCMF_C_SET_ROAM_TRIGGER		55
-#define BRCMF_C_SET_ROAM_DELTA			57
-#define BRCMF_C_GET_BCNPRD			75
-#define BRCMF_C_SET_BCNPRD			76
-#define BRCMF_C_GET_DTIMPRD			77
-#define BRCMF_C_SET_DTIMPRD			78
-#define BRCMF_C_SET_COUNTRY			84
-#define BRCMF_C_GET_PM				85
-#define BRCMF_C_SET_PM				86
-#define BRCMF_C_GET_CURR_RATESET		114
-#define BRCMF_C_GET_AP				117
-#define BRCMF_C_SET_AP				118
-#define BRCMF_C_GET_RSSI			127
-#define BRCMF_C_GET_WSEC			133
-#define BRCMF_C_SET_WSEC			134
-#define BRCMF_C_GET_PHY_NOISE			135
-#define BRCMF_C_GET_BSS_INFO			136
-#define BRCMF_C_GET_BANDLIST			140
-#define BRCMF_C_SET_SCB_TIMEOUT			158
-#define BRCMF_C_GET_PHYLIST			180
-#define BRCMF_C_SET_SCAN_CHANNEL_TIME		185
-#define BRCMF_C_SET_SCAN_UNASSOC_TIME		187
-#define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON	201
-#define BRCMF_C_GET_VALID_CHANNELS		217
-#define BRCMF_C_GET_KEY_PRIMARY			235
-#define BRCMF_C_SET_KEY_PRIMARY			236
-#define BRCMF_C_SET_SCAN_PASSIVE_TIME		258
-#define BRCMF_C_GET_VAR				262
-#define BRCMF_C_SET_VAR				263
-
-/* phy types (returned by WLC_GET_PHYTPE) */
-#define	WLC_PHY_TYPE_A		0
-#define	WLC_PHY_TYPE_B		1
-#define	WLC_PHY_TYPE_G		2
-#define	WLC_PHY_TYPE_N		4
-#define	WLC_PHY_TYPE_LP		5
-#define	WLC_PHY_TYPE_SSN	6
-#define	WLC_PHY_TYPE_HT		7
-#define	WLC_PHY_TYPE_LCN	8
-#define	WLC_PHY_TYPE_NULL	0xf
-
 #define TOE_TX_CSUM_OL		0x00000001
 #define TOE_RX_CSUM_OL		0x00000002
 
-#define	BRCMF_BSS_INFO_VERSION	109 /* curr ver of brcmf_bss_info_le struct */
-
-/* size of brcmf_scan_params not including variable length array */
-#define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
-
-/* masks for channel and ssid count */
-#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
-#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
-
-/* primary (ie tx) key */
-#define BRCMF_PRIMARY_KEY	(1 << 1)
-
 /* For supporting multiple interfaces */
 #define BRCMF_MAX_IFS	16
 
-#define DOT11_BSSTYPE_ANY			2
 #define DOT11_MAX_DEFAULT_KEYS	4
 
-#define BRCMF_ESCAN_REQ_VERSION 1
-
-#define WLC_BSS_RSSI_ON_CHANNEL		0x0002
-
-#define BRCMF_MAXRATES_IN_SET		16	/* max # of rates in rateset */
-#define BRCMF_STA_ASSOC			0x10		/* Associated */
-
-#define BRCMF_E_STATUS_SUCCESS			0
-#define BRCMF_E_STATUS_FAIL			1
-#define BRCMF_E_STATUS_TIMEOUT			2
-#define BRCMF_E_STATUS_NO_NETWORKS		3
-#define BRCMF_E_STATUS_ABORT			4
-#define BRCMF_E_STATUS_NO_ACK			5
-#define BRCMF_E_STATUS_UNSOLICITED		6
-#define BRCMF_E_STATUS_ATTEMPT			7
-#define BRCMF_E_STATUS_PARTIAL			8
-#define BRCMF_E_STATUS_NEWSCAN			9
-#define BRCMF_E_STATUS_NEWASSOC			10
-#define BRCMF_E_STATUS_11HQUIET			11
-#define BRCMF_E_STATUS_SUPPRESS			12
-#define BRCMF_E_STATUS_NOCHANS			13
-#define BRCMF_E_STATUS_CS_ABORT			15
-#define BRCMF_E_STATUS_ERROR			16
-
-#define BRCMF_E_REASON_INITIAL_ASSOC		0
-#define BRCMF_E_REASON_LOW_RSSI			1
-#define BRCMF_E_REASON_DEAUTH			2
-#define BRCMF_E_REASON_DISASSOC			3
-#define BRCMF_E_REASON_BCNS_LOST		4
-#define BRCMF_E_REASON_MINTXRATE		9
-#define BRCMF_E_REASON_TXFAIL			10
-
-#define BRCMF_E_REASON_LINK_BSSCFG_DIS		4
-#define BRCMF_E_REASON_FAST_ROAM_FAILED		5
-#define BRCMF_E_REASON_DIRECTED_ROAM		6
-#define BRCMF_E_REASON_TSPEC_REJECTED		7
-#define BRCMF_E_REASON_BETTER_AP		8
-
-#define BRCMF_E_PRUNE_ENCR_MISMATCH		1
-#define BRCMF_E_PRUNE_BCAST_BSSID		2
-#define BRCMF_E_PRUNE_MAC_DENY			3
-#define BRCMF_E_PRUNE_MAC_NA			4
-#define BRCMF_E_PRUNE_REG_PASSV			5
-#define BRCMF_E_PRUNE_SPCT_MGMT			6
-#define BRCMF_E_PRUNE_RADAR			7
-#define BRCMF_E_RSN_MISMATCH			8
-#define BRCMF_E_PRUNE_NO_COMMON_RATES		9
-#define BRCMF_E_PRUNE_BASIC_RATES		10
-#define BRCMF_E_PRUNE_CIPHER_NA			12
-#define BRCMF_E_PRUNE_KNOWN_STA			13
-#define BRCMF_E_PRUNE_WDS_PEER			15
-#define BRCMF_E_PRUNE_QBSS_LOAD			16
-#define BRCMF_E_PRUNE_HOME_AP			17
-
-#define BRCMF_E_SUP_OTHER			0
-#define BRCMF_E_SUP_DECRYPT_KEY_DATA		1
-#define BRCMF_E_SUP_BAD_UCAST_WEP128		2
-#define BRCMF_E_SUP_BAD_UCAST_WEP40		3
-#define BRCMF_E_SUP_UNSUP_KEY_LEN		4
-#define BRCMF_E_SUP_PW_KEY_CIPHER		5
-#define BRCMF_E_SUP_MSG3_TOO_MANY_IE		6
-#define BRCMF_E_SUP_MSG3_IE_MISMATCH		7
-#define BRCMF_E_SUP_NO_INSTALL_FLAG		8
-#define BRCMF_E_SUP_MSG3_NO_GTK			9
-#define BRCMF_E_SUP_GRP_KEY_CIPHER		10
-#define BRCMF_E_SUP_GRP_MSG1_NO_GTK		11
-#define BRCMF_E_SUP_GTK_DECRYPT_FAIL		12
-#define BRCMF_E_SUP_SEND_FAIL			13
-#define BRCMF_E_SUP_DEAUTH			14
-
-#define BRCMF_E_IF_ADD				1
-#define BRCMF_E_IF_DEL				2
-#define BRCMF_E_IF_CHANGE			3
-
-#define BRCMF_E_IF_FLAG_NOIF			1
-
-#define BRCMF_E_IF_ROLE_STA			0
-#define BRCMF_E_IF_ROLE_AP			1
-#define BRCMF_E_IF_ROLE_WDS			2
-
-#define BRCMF_E_LINK_BCN_LOSS			1
-#define BRCMF_E_LINK_DISASSOC			2
-#define BRCMF_E_LINK_ASSOC_REC			3
-#define BRCMF_E_LINK_BSSCFG_DIS			4
-
 /* Small, medium and maximum buffer size for dcmd
  */
 #define BRCMF_DCMD_SMLEN	256
 #define BRCMF_DCMD_MEDLEN	1536
 #define BRCMF_DCMD_MAXLEN	8192
 
-#define BRCMF_AMPDU_RX_REORDER_MAXFLOWS		256
-
-/* Pattern matching filter. Specifies an offset within received packets to
- * start matching, the pattern to match, the size of the pattern, and a bitmask
- * that indicates which bits within the pattern should be matched.
- */
-struct brcmf_pkt_filter_pattern_le {
-	/*
-	 * Offset within received packet to start pattern matching.
-	 * Offset '0' is the first byte of the ethernet header.
-	 */
-	__le32 offset;
-	/* Size of the pattern.  Bitmask must be the same size.*/
-	__le32 size_bytes;
-	/*
-	 * Variable length mask and pattern data. mask starts at offset 0.
-	 * Pattern immediately follows mask.
-	 */
-	u8 mask_and_pattern[1];
-};
-
-/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */
-struct brcmf_pkt_filter_le {
-	__le32 id;		/* Unique filter id, specified by app. */
-	__le32 type;		/* Filter type (WL_PKT_FILTER_TYPE_xxx). */
-	__le32 negate_match;	/* Negate the result of filter matches */
-	union {			/* Filter definitions */
-		struct brcmf_pkt_filter_pattern_le pattern; /* Filter pattern */
-	} u;
-};
-
-/* IOVAR "pkt_filter_enable" parameter. */
-struct brcmf_pkt_filter_enable_le {
-	__le32 id;		/* Unique filter id */
-	__le32 enable;		/* Enable/disable bool */
-};
-
-/* BSS info structure
- * Applications MUST CHECK ie_offset field and length field to access IEs and
- * next bss_info structure in a vector (in struct brcmf_scan_results)
+/* IOCTL from host to device are limited in lenght. A device can only handle
+ * ethernet frame size. This limitation is to be applied by protocol layer.
  */
-struct brcmf_bss_info_le {
-	__le32 version;		/* version field */
-	__le32 length;		/* byte length of data in this record,
-				 * starting at version and including IEs
-				 */
-	u8 BSSID[ETH_ALEN];
-	__le16 beacon_period;	/* units are Kusec */
-	__le16 capability;	/* Capability information */
-	u8 SSID_len;
-	u8 SSID[32];
-	struct {
-		__le32 count;   /* # rates in this set */
-		u8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
-	} rateset;		/* supported rates */
-	__le16 chanspec;	/* chanspec for bss */
-	__le16 atim_window;	/* units are Kusec */
-	u8 dtim_period;	/* DTIM period */
-	__le16 RSSI;		/* receive signal strength (in dBm) */
-	s8 phy_noise;		/* noise (in dBm) */
-
-	u8 n_cap;		/* BSS is 802.11N Capable */
-	/* 802.11N BSS Capabilities (based on HT_CAP_*): */
-	__le32 nbss_cap;
-	u8 ctl_ch;		/* 802.11N BSS control channel number */
-	__le32 reserved32[1];	/* Reserved for expansion of BSS properties */
-	u8 flags;		/* flags */
-	u8 reserved[3];	/* Reserved for expansion of BSS properties */
-	u8 basic_mcs[MCSSET_LEN];	/* 802.11N BSS required MCS set */
+#define BRCMF_TX_IOCTL_MAX_MSG_SIZE	(ETH_FRAME_LEN+ETH_FCS_LEN)
 
-	__le16 ie_offset;	/* offset at which IEs start, from beginning */
-	__le32 ie_length;	/* byte length of Information Elements */
-	__le16 SNR;		/* average SNR of during frame reception */
-	/* Add new fields here */
-	/* variable length Information Elements */
-};
-
-struct brcm_rateset_le {
-	/* # rates in this set */
-	__le32 count;
-	/* rates in 500kbps units w/hi bit set if basic */
-	u8 rates[BRCMF_MAXRATES_IN_SET];
-};
-
-struct brcmf_ssid {
-	u32 SSID_len;
-	unsigned char SSID[32];
-};
-
-struct brcmf_ssid_le {
-	__le32 SSID_len;
-	unsigned char SSID[32];
-};
-
-struct brcmf_scan_params_le {
-	struct brcmf_ssid_le ssid_le;	/* default: {0, ""} */
-	u8 bssid[ETH_ALEN];	/* default: bcast */
-	s8 bss_type;		/* default: any,
-				 * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
-				 */
-	u8 scan_type;	/* flags, 0 use default */
-	__le32 nprobes;	  /* -1 use default, number of probes per channel */
-	__le32 active_time;	/* -1 use default, dwell time per channel for
-				 * active scanning
-				 */
-	__le32 passive_time;	/* -1 use default, dwell time per channel
-				 * for passive scanning
-				 */
-	__le32 home_time;	/* -1 use default, dwell time for the
-				 * home channel between channel scans
-				 */
-	__le32 channel_num;	/* count of channels and ssids that follow
-				 *
-				 * low half is count of channels in
-				 * channel_list, 0 means default (use all
-				 * available channels)
-				 *
-				 * high half is entries in struct brcmf_ssid
-				 * array that follows channel_list, aligned for
-				 * s32 (4 bytes) meaning an odd channel count
-				 * implies a 2-byte pad between end of
-				 * channel_list and first ssid
-				 *
-				 * if ssid count is zero, single ssid in the
-				 * fixed parameter portion is assumed, otherwise
-				 * ssid in the fixed portion is ignored
-				 */
-	__le16 channel_list[1];	/* list of chanspecs */
-};
-
-struct brcmf_scan_results {
-	u32 buflen;
-	u32 version;
-	u32 count;
-	struct brcmf_bss_info_le bss_info_le[];
-};
-
-struct brcmf_escan_params_le {
-	__le32 version;
-	__le16 action;
-	__le16 sync_id;
-	struct brcmf_scan_params_le params_le;
-};
-
-struct brcmf_escan_result_le {
-	__le32 buflen;
-	__le32 version;
-	__le16 sync_id;
-	__le16 bss_count;
-	struct brcmf_bss_info_le bss_info_le;
-};
-
-#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(struct brcmf_escan_result_le) - \
-	sizeof(struct brcmf_bss_info_le))
-
-/* used for association with a specific BSSID and chanspec list */
-struct brcmf_assoc_params_le {
-	/* 00:00:00:00:00:00: broadcast scan */
-	u8 bssid[ETH_ALEN];
-	/* 0: all available channels, otherwise count of chanspecs in
-	 * chanspec_list */
-	__le32 chanspec_num;
-	/* list of chanspecs */
-	__le16 chanspec_list[1];
-};
-
-/* used for join with or without a specific bssid and channel list */
-struct brcmf_join_params {
-	struct brcmf_ssid_le ssid_le;
-	struct brcmf_assoc_params_le params_le;
-};
-
-/* scan params for extended join */
-struct brcmf_join_scan_params_le {
-	u8 scan_type;		/* 0 use default, active or passive scan */
-	__le32 nprobes;		/* -1 use default, nr of probes per channel */
-	__le32 active_time;	/* -1 use default, dwell time per channel for
-				 * active scanning
-				 */
-	__le32 passive_time;	/* -1 use default, dwell time per channel
-				 * for passive scanning
-				 */
-	__le32 home_time;	/* -1 use default, dwell time for the home
-				 * channel between channel scans
-				 */
-};
-
-/* extended join params */
-struct brcmf_ext_join_params_le {
-	struct brcmf_ssid_le ssid_le;	/* {0, ""}: wildcard scan */
-	struct brcmf_join_scan_params_le scan_le;
-	struct brcmf_assoc_params_le assoc_le;
-};
-
-struct brcmf_wsec_key {
-	u32 index;		/* key index */
-	u32 len;		/* key length */
-	u8 data[WLAN_MAX_KEY_LEN];	/* key data */
-	u32 pad_1[18];
-	u32 algo;	/* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
-	u32 flags;	/* misc flags */
-	u32 pad_2[3];
-	u32 iv_initialized;	/* has IV been initialized already? */
-	u32 pad_3;
-	/* Rx IV */
-	struct {
-		u32 hi;	/* upper 32 bits of IV */
-		u16 lo;	/* lower 16 bits of IV */
-	} rxiv;
-	u32 pad_4[2];
-	u8 ea[ETH_ALEN];	/* per station */
-};
-
-/*
- * dongle requires same struct as above but with fields in little endian order
- */
-struct brcmf_wsec_key_le {
-	__le32 index;		/* key index */
-	__le32 len;		/* key length */
-	u8 data[WLAN_MAX_KEY_LEN];	/* key data */
-	__le32 pad_1[18];
-	__le32 algo;	/* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
-	__le32 flags;	/* misc flags */
-	__le32 pad_2[3];
-	__le32 iv_initialized;	/* has IV been initialized already? */
-	__le32 pad_3;
-	/* Rx IV */
-	struct {
-		__le32 hi;	/* upper 32 bits of IV */
-		__le16 lo;	/* lower 16 bits of IV */
-	} rxiv;
-	__le32 pad_4[2];
-	u8 ea[ETH_ALEN];	/* per station */
-};
-
-/* Used to get specific STA parameters */
-struct brcmf_scb_val_le {
-	__le32 val;
-	u8 ea[ETH_ALEN];
-};
-
-/* channel encoding */
-struct brcmf_channel_info_le {
-	__le32 hw_channel;
-	__le32 target_channel;
-	__le32 scan_channel;
-};
-
-struct brcmf_sta_info_le {
-	__le16	ver;		/* version of this struct */
-	__le16	len;		/* length in bytes of this structure */
-	__le16	cap;		/* sta's advertised capabilities */
-	__le32	flags;		/* flags defined below */
-	__le32	idle;		/* time since data pkt rx'd from sta */
-	u8	ea[ETH_ALEN];		/* Station address */
-	__le32	count;			/* # rates in this set */
-	u8	rates[BRCMF_MAXRATES_IN_SET];	/* rates in 500kbps units */
-						/* w/hi bit set if basic */
-	__le32	in;		/* seconds elapsed since associated */
-	__le32	listen_interval_inms; /* Min Listen interval in ms for STA */
-	__le32	tx_pkts;	/* # of packets transmitted */
-	__le32	tx_failures;	/* # of packets failed */
-	__le32	rx_ucast_pkts;	/* # of unicast packets received */
-	__le32	rx_mcast_pkts;	/* # of multicast packets received */
-	__le32	tx_rate;	/* Rate of last successful tx frame */
-	__le32	rx_rate;	/* Rate of last successful rx frame */
-	__le32	rx_decrypt_succeeds;	/* # of packet decrypted successfully */
-	__le32	rx_decrypt_failures;	/* # of packet decrypted failed */
-};
-
-struct brcmf_chanspec_list {
-	__le32	count;		/* # of entries */
-	__le32	element[1];	/* variable length uint32 list */
-};
+#define BRCMF_AMPDU_RX_REORDER_MAXFLOWS		256
 
-/*
- * WLC_E_PROBRESP_MSG
- * WLC_E_P2P_PROBREQ_MSG
- * WLC_E_ACTION_FRAME_RX
+/* Length of firmware version string stored for
+ * ethtool driver info which uses 32 bytes as well.
  */
-struct brcmf_rx_mgmt_data {
-	__be16	version;
-	__be16	chanspec;
-	__be32	rssi;
-	__be32	mactime;
-	__be32	rate;
-};
+#define BRCMF_DRIVER_FIRMWARE_VERSION_LEN	32
 
 /* Bus independent dongle command */
 struct brcmf_dcmd {
@@ -535,7 +87,7 @@ struct brcmf_fws_info; /* firmware signalling info */
 struct brcmf_pub {
 	/* Linkage ponters */
 	struct brcmf_bus *bus_if;
-	struct brcmf_proto *prot;
+	struct brcmf_proto *proto;
 	struct brcmf_cfg80211_info *config;
 
 	/* Internal brcmf items */
@@ -544,7 +96,7 @@ struct brcmf_pub {
 	u8 wme_dp;		/* wme discard priority */
 
 	/* Dongle media info */
-	unsigned long drv_version;	/* Version of dongle-resident driver */
+	char fwver[BRCMF_DRIVER_FIRMWARE_VERSION_LEN];
 	u8 mac[ETH_ALEN];		/* MAC address obtained from dongle */
 
 	/* Multicast data packets sent to dongle */
@@ -566,14 +118,6 @@ struct brcmf_pub {
 #endif
 };
 
-struct brcmf_if_event {
-	u8 ifidx;
-	u8 action;
-	u8 flags;
-	u8 bssidx;
-	u8 role;
-};
-
 /* forward declarations */
 struct brcmf_cfg80211_vif;
 struct brcmf_fws_mac_descriptor;
@@ -635,16 +179,6 @@ int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
 /* Return pointer to interface name */
 char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
 
-/* Query dongle */
-int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
-			       void *buf, uint len);
-int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
-			     void *buf, uint len);
-
-/* Remove any protocol-specific data header. */
-int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
-			struct sk_buff *rxp);
-
 int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
 struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
 			      char *name, u8 *mac_addr);
@@ -655,4 +189,7 @@ u32 brcmf_get_chip_info(struct brcmf_if *ifp);
 void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
 		      bool success);
 
+/* Sets dongle media info (drv_version, mac address). */
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+
 #endif				/* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index a6eb09e5d46f..c4535616064e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -17,13 +17,23 @@
 #ifndef _BRCMF_BUS_H_
 #define _BRCMF_BUS_H_
 
+#include "dhd_dbg.h"
+
 /* The level of bus communication with the dongle */
 enum brcmf_bus_state {
+	BRCMF_BUS_UNKNOWN,	/* Not determined yet */
+	BRCMF_BUS_NOMEDIUM,	/* No medium access to dongle */
 	BRCMF_BUS_DOWN,		/* Not ready for frame transfers */
 	BRCMF_BUS_LOAD,		/* Download access only (CPU reset) */
 	BRCMF_BUS_DATA		/* Ready for frame transfers */
 };
 
+/* The level of bus communication with the dongle */
+enum brcmf_bus_protocol_type {
+	BRCMF_PROTO_BCDC,
+	BRCMF_PROTO_MSGBUF
+};
+
 struct brcmf_bus_dcmd {
 	char *name;
 	char *param;
@@ -34,6 +44,7 @@ struct brcmf_bus_dcmd {
 /**
  * struct brcmf_bus_ops - bus callback operations.
  *
+ * @preinit: execute bus/device specific dongle init commands (optional).
  * @init: prepare for communication with dongle.
  * @stop: clear pending frames, disable data flow.
  * @txdata: send a data frame to the dongle. When the data
@@ -51,6 +62,7 @@ struct brcmf_bus_dcmd {
  * indicated otherwise these callbacks are mandatory.
  */
 struct brcmf_bus_ops {
+	int (*preinit)(struct device *dev);
 	int (*init)(struct device *dev);
 	void (*stop)(struct device *dev);
 	int (*txdata)(struct device *dev, struct sk_buff *skb);
@@ -63,6 +75,7 @@ struct brcmf_bus_ops {
  * struct brcmf_bus - interface structure between common and bus layer
  *
  * @bus_priv: pointer to private bus device.
+ * @proto_type: protocol type, bcdc or msgbuf
  * @dev: device pointer of bus device.
  * @drvr: public driver information.
  * @state: operational state of the bus interface.
@@ -78,6 +91,7 @@ struct brcmf_bus {
 		struct brcmf_sdio_dev *sdio;
 		struct brcmf_usbdev *usb;
 	} bus_priv;
+	enum brcmf_bus_protocol_type proto_type;
 	struct device *dev;
 	struct brcmf_pub *drvr;
 	enum brcmf_bus_state state;
@@ -85,7 +99,6 @@ struct brcmf_bus {
 	unsigned long tx_realloc;
 	u32 chip;
 	u32 chiprev;
-	struct list_head dcmd_list;
 
 	struct brcmf_bus_ops *ops;
 };
@@ -93,6 +106,13 @@ struct brcmf_bus {
 /*
  * callback wrappers
  */
+static inline int brcmf_bus_preinit(struct brcmf_bus *bus)
+{
+	if (!bus->ops->preinit)
+		return 0;
+	return bus->ops->preinit(bus->dev);
+}
+
 static inline int brcmf_bus_init(struct brcmf_bus *bus)
 {
 	return bus->ops->init(bus->dev);
@@ -128,6 +148,23 @@ struct pktq *brcmf_bus_gettxq(struct brcmf_bus *bus)
 
 	return bus->ops->gettxq(bus->dev);
 }
+
+static inline bool brcmf_bus_ready(struct brcmf_bus *bus)
+{
+	return bus->state == BRCMF_BUS_LOAD || bus->state == BRCMF_BUS_DATA;
+}
+
+static inline void brcmf_bus_change_state(struct brcmf_bus *bus,
+					  enum brcmf_bus_state new_state)
+{
+	/* NOMEDIUM is permanent */
+	if (bus->state == BRCMF_BUS_NOMEDIUM)
+		return;
+
+	brcmf_dbg(TRACE, "%d -> %d\n", bus->state, new_state);
+	bus->state = new_state;
+}
+
 /*
  * interface functions from common layer
  */
@@ -139,7 +176,7 @@ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
 void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp);
 
 /* Indication from bus module regarding presence/insertion of dongle. */
-int brcmf_attach(uint bus_hdrlen, struct device *dev);
+int brcmf_attach(struct device *dev);
 /* Indication from bus module regarding removal/absence of dongle */
 void brcmf_detach(struct device *dev);
 /* Indication from bus module that dongle should be reset */
@@ -151,6 +188,9 @@ void brcmf_txflowblock(struct device *dev, bool state);
 void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
 
 int brcmf_bus_start(struct device *dev);
+s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data,
+				u32 len);
+void brcmf_bus_add_txhdrlen(struct device *dev, uint len);
 
 #ifdef CONFIG_BRCMFMAC_SDIO
 void brcmf_sdio_exit(void);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
deleted file mode 100644
index dd85401063cb..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ /dev/null
@@ -1,392 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*******************************************************************************
- * Communicates with the dongle by using dcmd codes.
- * For certain dcmd codes, the dongle interprets string data from the host.
- ******************************************************************************/
-
-#include <linux/types.h>
-#include <linux/netdevice.h>
-
-#include <brcmu_utils.h>
-#include <brcmu_wifi.h>
-
-#include "dhd.h"
-#include "dhd_proto.h"
-#include "dhd_bus.h"
-#include "fwsignal.h"
-#include "dhd_dbg.h"
-#include "tracepoint.h"
-
-struct brcmf_proto_cdc_dcmd {
-	__le32 cmd;	/* dongle command value */
-	__le32 len;	/* lower 16: output buflen;
-			 * upper 16: input buflen (excludes header) */
-	__le32 flags;	/* flag defns given below */
-	__le32 status;	/* status code returned from the device */
-};
-
-/* Max valid buffer size that can be sent to the dongle */
-#define CDC_MAX_MSG_SIZE	(ETH_FRAME_LEN+ETH_FCS_LEN)
-
-/* CDC flag definitions */
-#define CDC_DCMD_ERROR		0x01	/* 1=cmd failed */
-#define CDC_DCMD_SET		0x02	/* 0=get, 1=set cmd */
-#define CDC_DCMD_IF_MASK	0xF000		/* I/F index */
-#define CDC_DCMD_IF_SHIFT	12
-#define CDC_DCMD_ID_MASK	0xFFFF0000	/* id an cmd pairing */
-#define CDC_DCMD_ID_SHIFT	16		/* ID Mask shift bits */
-#define CDC_DCMD_ID(flags)	\
-	(((flags) & CDC_DCMD_ID_MASK) >> CDC_DCMD_ID_SHIFT)
-
-/*
- * BDC header - Broadcom specific extension of CDC.
- * Used on data packets to convey priority across USB.
- */
-#define	BDC_HEADER_LEN		4
-#define BDC_PROTO_VER		2	/* Protocol version */
-#define BDC_FLAG_VER_MASK	0xf0	/* Protocol version mask */
-#define BDC_FLAG_VER_SHIFT	4	/* Protocol version shift */
-#define BDC_FLAG_SUM_GOOD	0x04	/* Good RX checksums */
-#define BDC_FLAG_SUM_NEEDED	0x08	/* Dongle needs to do TX checksums */
-#define BDC_PRIORITY_MASK	0x7
-#define BDC_FLAG2_IF_MASK	0x0f	/* packet rx interface in APSTA */
-#define BDC_FLAG2_IF_SHIFT	0
-
-#define BDC_GET_IF_IDX(hdr) \
-	((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT))
-#define BDC_SET_IF_IDX(hdr, idx) \
-	((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | \
-	((idx) << BDC_FLAG2_IF_SHIFT)))
-
-/**
- * struct brcmf_proto_bdc_header - BDC header format
- *
- * @flags: flags contain protocol and checksum info.
- * @priority: 802.1d priority and USB flow control info (bit 4:7).
- * @flags2: additional flags containing dongle interface index.
- * @data_offset: start of packet data. header is following by firmware signals.
- */
-struct brcmf_proto_bdc_header {
-	u8 flags;
-	u8 priority;
-	u8 flags2;
-	u8 data_offset;
-};
-
-/*
- * maximum length of firmware signal data between
- * the BDC header and packet data in the tx path.
- */
-#define BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES	12
-
-#define RETRIES 2 /* # of retries to retrieve matching dcmd response */
-#define BUS_HEADER_LEN	(16+64)		/* Must be atleast SDPCM_RESERVE
-					 * (amount of header tha might be added)
-					 * plus any space that might be needed
-					 * for bus alignment padding.
-					 */
-#define ROUND_UP_MARGIN	2048	/* Biggest bus block size possible for
-				 * round off at the end of buffer
-				 * Currently is SDIO
-				 */
-
-struct brcmf_proto {
-	u16 reqid;
-	u8 bus_header[BUS_HEADER_LEN];
-	struct brcmf_proto_cdc_dcmd msg;
-	unsigned char buf[BRCMF_DCMD_MAXLEN + ROUND_UP_MARGIN];
-};
-
-static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr)
-{
-	struct brcmf_proto *prot = drvr->prot;
-	int len = le32_to_cpu(prot->msg.len) +
-			sizeof(struct brcmf_proto_cdc_dcmd);
-
-	brcmf_dbg(CDC, "Enter\n");
-
-	/* NOTE : cdc->msg.len holds the desired length of the buffer to be
-	 *        returned. Only up to CDC_MAX_MSG_SIZE of this buffer area
-	 *        is actually sent to the dongle
-	 */
-	if (len > CDC_MAX_MSG_SIZE)
-		len = CDC_MAX_MSG_SIZE;
-
-	/* Send request */
-	return brcmf_bus_txctl(drvr->bus_if, (unsigned char *)&prot->msg, len);
-}
-
-static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
-{
-	int ret;
-	struct brcmf_proto *prot = drvr->prot;
-
-	brcmf_dbg(CDC, "Enter\n");
-	len += sizeof(struct brcmf_proto_cdc_dcmd);
-	do {
-		ret = brcmf_bus_rxctl(drvr->bus_if, (unsigned char *)&prot->msg,
-				      len);
-		if (ret < 0)
-			break;
-	} while (CDC_DCMD_ID(le32_to_cpu(prot->msg.flags)) != id);
-
-	return ret;
-}
-
-int
-brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
-			       void *buf, uint len)
-{
-	struct brcmf_proto *prot = drvr->prot;
-	struct brcmf_proto_cdc_dcmd *msg = &prot->msg;
-	void *info;
-	int ret = 0, retries = 0;
-	u32 id, flags;
-
-	brcmf_dbg(CDC, "Enter, cmd %d len %d\n", cmd, len);
-
-	memset(msg, 0, sizeof(struct brcmf_proto_cdc_dcmd));
-
-	msg->cmd = cpu_to_le32(cmd);
-	msg->len = cpu_to_le32(len);
-	flags = (++prot->reqid << CDC_DCMD_ID_SHIFT);
-	flags = (flags & ~CDC_DCMD_IF_MASK) |
-		(ifidx << CDC_DCMD_IF_SHIFT);
-	msg->flags = cpu_to_le32(flags);
-
-	if (buf)
-		memcpy(prot->buf, buf, len);
-
-	ret = brcmf_proto_cdc_msg(drvr);
-	if (ret < 0) {
-		brcmf_err("brcmf_proto_cdc_msg failed w/status %d\n",
-			  ret);
-		goto done;
-	}
-
-retry:
-	/* wait for interrupt and get first fragment */
-	ret = brcmf_proto_cdc_cmplt(drvr, prot->reqid, len);
-	if (ret < 0)
-		goto done;
-
-	flags = le32_to_cpu(msg->flags);
-	id = (flags & CDC_DCMD_ID_MASK) >> CDC_DCMD_ID_SHIFT;
-
-	if ((id < prot->reqid) && (++retries < RETRIES))
-		goto retry;
-	if (id != prot->reqid) {
-		brcmf_err("%s: unexpected request id %d (expected %d)\n",
-			  brcmf_ifname(drvr, ifidx), id, prot->reqid);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	/* Check info buffer */
-	info = (void *)&msg[1];
-
-	/* Copy info buffer */
-	if (buf) {
-		if (ret < (int)len)
-			len = ret;
-		memcpy(buf, info, len);
-	}
-
-	/* Check the ERROR flag */
-	if (flags & CDC_DCMD_ERROR)
-		ret = le32_to_cpu(msg->status);
-
-done:
-	return ret;
-}
-
-int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
-				 void *buf, uint len)
-{
-	struct brcmf_proto *prot = drvr->prot;
-	struct brcmf_proto_cdc_dcmd *msg = &prot->msg;
-	int ret = 0;
-	u32 flags, id;
-
-	brcmf_dbg(CDC, "Enter, cmd %d len %d\n", cmd, len);
-
-	memset(msg, 0, sizeof(struct brcmf_proto_cdc_dcmd));
-
-	msg->cmd = cpu_to_le32(cmd);
-	msg->len = cpu_to_le32(len);
-	flags = (++prot->reqid << CDC_DCMD_ID_SHIFT) | CDC_DCMD_SET;
-	flags = (flags & ~CDC_DCMD_IF_MASK) |
-		(ifidx << CDC_DCMD_IF_SHIFT);
-	msg->flags = cpu_to_le32(flags);
-
-	if (buf)
-		memcpy(prot->buf, buf, len);
-
-	ret = brcmf_proto_cdc_msg(drvr);
-	if (ret < 0)
-		goto done;
-
-	ret = brcmf_proto_cdc_cmplt(drvr, prot->reqid, len);
-	if (ret < 0)
-		goto done;
-
-	flags = le32_to_cpu(msg->flags);
-	id = (flags & CDC_DCMD_ID_MASK) >> CDC_DCMD_ID_SHIFT;
-
-	if (id != prot->reqid) {
-		brcmf_err("%s: unexpected request id %d (expected %d)\n",
-			  brcmf_ifname(drvr, ifidx), id, prot->reqid);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	/* Check the ERROR flag */
-	if (flags & CDC_DCMD_ERROR)
-		ret = le32_to_cpu(msg->status);
-
-done:
-	return ret;
-}
-
-static bool pkt_sum_needed(struct sk_buff *skb)
-{
-	return skb->ip_summed == CHECKSUM_PARTIAL;
-}
-
-static void pkt_set_sum_good(struct sk_buff *skb, bool x)
-{
-	skb->ip_summed = (x ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
-}
-
-void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx, u8 offset,
-			 struct sk_buff *pktbuf)
-{
-	struct brcmf_proto_bdc_header *h;
-
-	brcmf_dbg(CDC, "Enter\n");
-
-	/* Push BDC header used to convey priority for buses that don't */
-	skb_push(pktbuf, BDC_HEADER_LEN);
-
-	h = (struct brcmf_proto_bdc_header *)(pktbuf->data);
-
-	h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
-	if (pkt_sum_needed(pktbuf))
-		h->flags |= BDC_FLAG_SUM_NEEDED;
-
-	h->priority = (pktbuf->priority & BDC_PRIORITY_MASK);
-	h->flags2 = 0;
-	h->data_offset = offset;
-	BDC_SET_IF_IDX(h, ifidx);
-	trace_brcmf_bdchdr(pktbuf->data);
-}
-
-int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
-			struct sk_buff *pktbuf)
-{
-	struct brcmf_proto_bdc_header *h;
-
-	brcmf_dbg(CDC, "Enter\n");
-
-	/* Pop BDC header used to convey priority for buses that don't */
-
-	if (pktbuf->len <= BDC_HEADER_LEN) {
-		brcmf_dbg(INFO, "rx data too short (%d <= %d)\n",
-			  pktbuf->len, BDC_HEADER_LEN);
-		return -EBADE;
-	}
-
-	trace_brcmf_bdchdr(pktbuf->data);
-	h = (struct brcmf_proto_bdc_header *)(pktbuf->data);
-
-	*ifidx = BDC_GET_IF_IDX(h);
-	if (*ifidx >= BRCMF_MAX_IFS) {
-		brcmf_err("rx data ifnum out of range (%d)\n", *ifidx);
-		return -EBADE;
-	}
-	/* The ifidx is the idx to map to matching netdev/ifp. When receiving
-	 * events this is easy because it contains the bssidx which maps
-	 * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
-	 * bssidx 1 is used for p2p0 and no data can be received or
-	 * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
-	 */
-	if (*ifidx)
-		(*ifidx)++;
-
-	if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) !=
-	    BDC_PROTO_VER) {
-		brcmf_err("%s: non-BDC packet received, flags 0x%x\n",
-			  brcmf_ifname(drvr, *ifidx), h->flags);
-		return -EBADE;
-	}
-
-	if (h->flags & BDC_FLAG_SUM_GOOD) {
-		brcmf_dbg(CDC, "%s: BDC rcv, good checksum, flags 0x%x\n",
-			  brcmf_ifname(drvr, *ifidx), h->flags);
-		pkt_set_sum_good(pktbuf, true);
-	}
-
-	pktbuf->priority = h->priority & BDC_PRIORITY_MASK;
-
-	skb_pull(pktbuf, BDC_HEADER_LEN);
-	if (do_fws)
-		brcmf_fws_hdrpull(drvr, *ifidx, h->data_offset << 2, pktbuf);
-	else
-		skb_pull(pktbuf, h->data_offset << 2);
-
-	if (pktbuf->len == 0)
-		return -ENODATA;
-	return 0;
-}
-
-int brcmf_proto_attach(struct brcmf_pub *drvr)
-{
-	struct brcmf_proto *cdc;
-
-	cdc = kzalloc(sizeof(struct brcmf_proto), GFP_ATOMIC);
-	if (!cdc)
-		goto fail;
-
-	/* ensure that the msg buf directly follows the cdc msg struct */
-	if ((unsigned long)(&cdc->msg + 1) != (unsigned long)cdc->buf) {
-		brcmf_err("struct brcmf_proto is not correctly defined\n");
-		goto fail;
-	}
-
-	drvr->prot = cdc;
-	drvr->hdrlen += BDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
-	drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN +
-			sizeof(struct brcmf_proto_cdc_dcmd) + ROUND_UP_MARGIN;
-	return 0;
-
-fail:
-	kfree(cdc);
-	return -ENOMEM;
-}
-
-/* ~NOTE~ What if another thread is waiting on the semaphore?  Holding it? */
-void brcmf_proto_detach(struct brcmf_pub *drvr)
-{
-	kfree(drvr->prot);
-	drvr->prot = NULL;
-}
-
-void brcmf_proto_stop(struct brcmf_pub *drvr)
-{
-	/* Nothing to do for CDC */
-}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 9431af2465f3..6a8983a1fb9c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -21,9 +21,9 @@
 #include <brcmu_utils.h>
 #include "dhd.h"
 #include "dhd_bus.h"
-#include "dhd_proto.h"
 #include "dhd_dbg.h"
 #include "fwil.h"
+#include "fwil_types.h"
 #include "tracepoint.h"
 
 #define PKTFILTER_BUF_SIZE		128
@@ -32,15 +32,6 @@
 #define BRCMF_DEFAULT_SCAN_UNASSOC_TIME	40
 #define BRCMF_DEFAULT_PACKET_FILTER	"100 0 0 0 0x01 0x00"
 
-#ifdef DEBUG
-static const char brcmf_version[] =
-	"Dongle Host Driver, version " BRCMF_VERSION_STR "\nCompiled on "
-	__DATE__ " at " __TIME__;
-#else
-static const char brcmf_version[] =
-	"Dongle Host Driver, version " BRCMF_VERSION_STR;
-#endif
-
 
 bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
 		      struct sk_buff *pkt, int prec)
@@ -257,8 +248,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
 	u8 buf[BRCMF_DCMD_SMLEN];
 	char *ptr;
 	s32 err;
-	struct brcmf_bus_dcmd *cmdlst;
-	struct list_head *cur, *q;
 
 	/* retreive mac address */
 	err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
@@ -281,9 +270,14 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
 	}
 	ptr = (char *)buf;
 	strsep(&ptr, "\n");
+
 	/* Print fw version info */
 	brcmf_err("Firmware version = %s\n", buf);
 
+	/* locate firmware version number for ethtool */
+	ptr = strrchr(buf, ' ') + 1;
+	strlcpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
+
 	/*
 	 * Setup timeout if Beacons are lost and roam is off to report
 	 * link down
@@ -342,17 +336,8 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
 	brcmf_c_pktfilter_offload_enable(ifp, BRCMF_DEFAULT_PACKET_FILTER,
 					 0, true);
 
-	/* set bus specific command if there is any */
-	list_for_each_safe(cur, q, &ifp->drvr->bus_if->dcmd_list) {
-		cmdlst = list_entry(cur, struct brcmf_bus_dcmd, list);
-		if (cmdlst->name && cmdlst->param && cmdlst->param_len) {
-			brcmf_fil_iovar_data_set(ifp, cmdlst->name,
-						 cmdlst->param,
-						 cmdlst->param_len);
-		}
-		list_del(cur);
-		kfree(cmdlst);
-	}
+	/* do bus specific preinit here */
+	err = brcmf_bus_preinit(ifp->drvr->bus_if);
 done:
 	return err;
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
index 0f9e9057e7dd..03fe8aca4d32 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
@@ -22,7 +22,6 @@
 #include "dhd.h"
 #include "dhd_bus.h"
 #include "dhd_dbg.h"
-#include "tracepoint.h"
 
 static struct dentry *root_folder;
 
@@ -42,6 +41,40 @@ void brcmf_debugfs_exit(void)
 	root_folder = NULL;
 }
 
+static
+ssize_t brcmf_debugfs_chipinfo_read(struct file *f, char __user *data,
+				   size_t count, loff_t *ppos)
+{
+	struct brcmf_pub *drvr = f->private_data;
+	struct brcmf_bus *bus = drvr->bus_if;
+	char buf[40];
+	int res;
+
+	/* only allow read from start */
+	if (*ppos > 0)
+		return 0;
+
+	res = scnprintf(buf, sizeof(buf), "chip: %x(%u) rev %u\n",
+			bus->chip, bus->chip, bus->chiprev);
+	return simple_read_from_buffer(data, count, ppos, buf, res);
+}
+
+static const struct file_operations brcmf_debugfs_chipinfo_ops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = brcmf_debugfs_chipinfo_read
+};
+
+static int brcmf_debugfs_create_chipinfo(struct brcmf_pub *drvr)
+{
+	struct dentry *dentry =  drvr->dbgfs_dir;
+
+	if (!IS_ERR_OR_NULL(dentry))
+		debugfs_create_file("chipinfo", S_IRUGO, dentry, drvr,
+				    &brcmf_debugfs_chipinfo_ops);
+	return 0;
+}
+
 int brcmf_debugfs_attach(struct brcmf_pub *drvr)
 {
 	struct device *dev = drvr->bus_if->dev;
@@ -50,6 +83,7 @@ int brcmf_debugfs_attach(struct brcmf_pub *drvr)
 		return -ENODEV;
 
 	drvr->dbgfs_dir = debugfs_create_dir(dev_name(dev), root_folder);
+	brcmf_debugfs_create_chipinfo(drvr);
 	return PTR_ERR_OR_ZERO(drvr->dbgfs_dir);
 }
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index 0af1f5dc583a..ef52ed7abc69 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -33,7 +33,7 @@
 #define BRCMF_USB_VAL	0x00002000
 #define BRCMF_SCAN_VAL	0x00004000
 #define BRCMF_CONN_VAL	0x00008000
-#define BRCMF_CDC_VAL	0x00010000
+#define BRCMF_BCDC_VAL	0x00010000
 #define BRCMF_SDIO_VAL	0x00020000
 
 /* set default print format */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 64e9cff241b9..d4d966beb840 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -24,13 +24,13 @@
 
 #include "dhd.h"
 #include "dhd_bus.h"
-#include "dhd_proto.h"
 #include "dhd_dbg.h"
 #include "fwil_types.h"
 #include "p2p.h"
 #include "wl_cfg80211.h"
 #include "fwil.h"
 #include "fwsignal.h"
+#include "proto.h"
 
 MODULE_AUTHOR("Broadcom Corporation");
 MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
@@ -592,28 +592,6 @@ static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
 	return &ifp->stats;
 }
 
-/*
- * Set current toe component enables in toe_ol iovar,
- * and set toe global enable iovar
- */
-static int brcmf_toe_set(struct brcmf_if *ifp, u32 toe_ol)
-{
-	s32 err;
-
-	err = brcmf_fil_iovar_int_set(ifp, "toe_ol", toe_ol);
-	if (err < 0) {
-		brcmf_err("Setting toe_ol failed, %d\n", err);
-		return err;
-	}
-
-	err = brcmf_fil_iovar_int_set(ifp, "toe", (toe_ol != 0));
-	if (err < 0)
-		brcmf_err("Setting toe failed, %d\n", err);
-
-	return err;
-
-}
-
 static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
 				    struct ethtool_drvinfo *info)
 {
@@ -621,8 +599,8 @@ static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
 	struct brcmf_pub *drvr = ifp->drvr;
 
 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
-	snprintf(info->version, sizeof(info->version), "%lu",
-		 drvr->drv_version);
+	snprintf(info->version, sizeof(info->version), "n/a");
+	strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
 	strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
 		sizeof(info->bus_info));
 }
@@ -631,124 +609,6 @@ static const struct ethtool_ops brcmf_ethtool_ops = {
 	.get_drvinfo = brcmf_ethtool_get_drvinfo,
 };
 
-static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr)
-{
-	struct brcmf_pub *drvr = ifp->drvr;
-	struct ethtool_drvinfo info;
-	char drvname[sizeof(info.driver)];
-	u32 cmd;
-	struct ethtool_value edata;
-	u32 toe_cmpnt, csum_dir;
-	int ret;
-
-	brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
-
-	/* all ethtool calls start with a cmd word */
-	if (copy_from_user(&cmd, uaddr, sizeof(u32)))
-		return -EFAULT;
-
-	switch (cmd) {
-	case ETHTOOL_GDRVINFO:
-		/* Copy out any request driver name */
-		if (copy_from_user(&info, uaddr, sizeof(info)))
-			return -EFAULT;
-		strncpy(drvname, info.driver, sizeof(info.driver));
-		drvname[sizeof(info.driver) - 1] = '\0';
-
-		/* clear struct for return */
-		memset(&info, 0, sizeof(info));
-		info.cmd = cmd;
-
-		/* if requested, identify ourselves */
-		if (strcmp(drvname, "?dhd") == 0) {
-			sprintf(info.driver, "dhd");
-			strcpy(info.version, BRCMF_VERSION_STR);
-		}
-		/* report dongle driver type */
-		else
-			sprintf(info.driver, "wl");
-
-		sprintf(info.version, "%lu", drvr->drv_version);
-		if (copy_to_user(uaddr, &info, sizeof(info)))
-			return -EFAULT;
-		brcmf_dbg(TRACE, "given %*s, returning %s\n",
-			  (int)sizeof(drvname), drvname, info.driver);
-		break;
-
-		/* Get toe offload components from dongle */
-	case ETHTOOL_GRXCSUM:
-	case ETHTOOL_GTXCSUM:
-		ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
-		if (ret < 0)
-			return ret;
-
-		csum_dir =
-		    (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
-
-		edata.cmd = cmd;
-		edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
-
-		if (copy_to_user(uaddr, &edata, sizeof(edata)))
-			return -EFAULT;
-		break;
-
-		/* Set toe offload components in dongle */
-	case ETHTOOL_SRXCSUM:
-	case ETHTOOL_STXCSUM:
-		if (copy_from_user(&edata, uaddr, sizeof(edata)))
-			return -EFAULT;
-
-		/* Read the current settings, update and write back */
-		ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
-		if (ret < 0)
-			return ret;
-
-		csum_dir =
-		    (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
-
-		if (edata.data != 0)
-			toe_cmpnt |= csum_dir;
-		else
-			toe_cmpnt &= ~csum_dir;
-
-		ret = brcmf_toe_set(ifp, toe_cmpnt);
-		if (ret < 0)
-			return ret;
-
-		/* If setting TX checksum mode, tell Linux the new mode */
-		if (cmd == ETHTOOL_STXCSUM) {
-			if (edata.data)
-				ifp->ndev->features |= NETIF_F_IP_CSUM;
-			else
-				ifp->ndev->features &= ~NETIF_F_IP_CSUM;
-		}
-
-		break;
-
-	default:
-		return -EOPNOTSUPP;
-	}
-
-	return 0;
-}
-
-static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
-				    int cmd)
-{
-	struct brcmf_if *ifp = netdev_priv(ndev);
-	struct brcmf_pub *drvr = ifp->drvr;
-
-	brcmf_dbg(TRACE, "Enter, idx=%d, cmd=0x%04x\n", ifp->bssidx, cmd);
-
-	if (!drvr->iflist[ifp->bssidx])
-		return -1;
-
-	if (cmd == SIOCETHTOOL)
-		return brcmf_ethtool(ifp, ifr->ifr_data);
-
-	return -EOPNOTSUPP;
-}
-
 static int brcmf_netdev_stop(struct net_device *ndev)
 {
 	struct brcmf_if *ifp = netdev_priv(ndev);
@@ -769,7 +629,6 @@ static int brcmf_netdev_open(struct net_device *ndev)
 	struct brcmf_pub *drvr = ifp->drvr;
 	struct brcmf_bus *bus_if = drvr->bus_if;
 	u32 toe_ol;
-	s32 ret = 0;
 
 	brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
 
@@ -788,21 +647,20 @@ static int brcmf_netdev_open(struct net_device *ndev)
 	else
 		ndev->features &= ~NETIF_F_IP_CSUM;
 
-	/* Allow transmit calls */
-	netif_start_queue(ndev);
 	if (brcmf_cfg80211_up(ndev)) {
 		brcmf_err("failed to bring up cfg80211\n");
-		return -1;
+		return -EIO;
 	}
 
-	return ret;
+	/* Allow transmit calls */
+	netif_start_queue(ndev);
+	return 0;
 }
 
 static const struct net_device_ops brcmf_netdev_ops_pri = {
 	.ndo_open = brcmf_netdev_open,
 	.ndo_stop = brcmf_netdev_stop,
 	.ndo_get_stats = brcmf_netdev_get_stats,
-	.ndo_do_ioctl = brcmf_netdev_ioctl_entry,
 	.ndo_start_xmit = brcmf_netdev_start_xmit,
 	.ndo_set_mac_address = brcmf_netdev_set_mac_address,
 	.ndo_set_rx_mode = brcmf_netdev_set_multicast_list
@@ -844,7 +702,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
 
 	brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
 
-	ndev->destructor = free_netdev;
+	ndev->destructor = brcmf_cfg80211_free_netdev;
 	return 0;
 
 fail:
@@ -868,13 +726,6 @@ static int brcmf_net_p2p_stop(struct net_device *ndev)
 	return brcmf_cfg80211_down(ndev);
 }
 
-static int brcmf_net_p2p_do_ioctl(struct net_device *ndev,
-				  struct ifreq *ifr, int cmd)
-{
-	brcmf_dbg(TRACE, "Enter\n");
-	return 0;
-}
-
 static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
 					    struct net_device *ndev)
 {
@@ -887,7 +738,6 @@ static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
 static const struct net_device_ops brcmf_netdev_ops_p2p = {
 	.ndo_open = brcmf_net_p2p_open,
 	.ndo_stop = brcmf_net_p2p_stop,
-	.ndo_do_ioctl = brcmf_net_p2p_do_ioctl,
 	.ndo_start_xmit = brcmf_net_p2p_start_xmit
 };
 
@@ -1009,14 +859,12 @@ void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
 		}
 		/* unregister will take care of freeing it */
 		unregister_netdev(ifp->ndev);
-		if (bssidx == 0)
-			brcmf_cfg80211_detach(drvr->config);
 	} else {
 		kfree(ifp);
 	}
 }
 
-int brcmf_attach(uint bus_hdrlen, struct device *dev)
+int brcmf_attach(struct device *dev)
 {
 	struct brcmf_pub *drvr = NULL;
 	int ret = 0;
@@ -1031,7 +879,7 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
 	mutex_init(&drvr->proto_block);
 
 	/* Link to bus module */
-	drvr->hdrlen = bus_hdrlen;
+	drvr->hdrlen = 0;
 	drvr->bus_if = dev_get_drvdata(dev);
 	drvr->bus_if->drvr = drvr;
 
@@ -1048,8 +896,6 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
 	/* attach firmware event handler */
 	brcmf_fweh_attach(drvr);
 
-	INIT_LIST_HEAD(&drvr->bus_if->dcmd_list);
-
 	return ret;
 
 fail:
@@ -1088,7 +934,7 @@ int brcmf_bus_start(struct device *dev)
 		p2p_ifp = NULL;
 
 	/* signal bus ready */
-	bus_if->state = BRCMF_BUS_DATA;
+	brcmf_bus_change_state(bus_if, BRCMF_BUS_DATA);
 
 	/* Bus is ready, do any initialization */
 	ret = brcmf_c_preinit_dcmds(ifp);
@@ -1115,8 +961,7 @@ int brcmf_bus_start(struct device *dev)
 fail:
 	if (ret < 0) {
 		brcmf_err("failed: %d\n", ret);
-		if (drvr->config)
-			brcmf_cfg80211_detach(drvr->config);
+		brcmf_cfg80211_detach(drvr->config);
 		if (drvr->fws) {
 			brcmf_fws_del_interface(ifp);
 			brcmf_fws_deinit(drvr);
@@ -1138,14 +983,21 @@ fail:
 	return 0;
 }
 
+void brcmf_bus_add_txhdrlen(struct device *dev, uint len)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
+
+	if (drvr) {
+		drvr->hdrlen += len;
+	}
+}
+
 static void brcmf_bus_detach(struct brcmf_pub *drvr)
 {
 	brcmf_dbg(TRACE, "Enter\n");
 
 	if (drvr) {
-		/* Stop the protocol module */
-		brcmf_proto_stop(drvr);
-
 		/* Stop the bus module */
 		brcmf_bus_stop(drvr->bus_if);
 	}
@@ -1177,6 +1029,8 @@ void brcmf_detach(struct device *dev)
 	/* stop firmware event handling */
 	brcmf_fweh_detach(drvr);
 
+	brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
+
 	/* make sure primary interface removed last */
 	for (i = BRCMF_MAX_IFS-1; i > -1; i--)
 		if (drvr->iflist[i]) {
@@ -1184,10 +1038,11 @@ void brcmf_detach(struct device *dev)
 			brcmf_del_if(drvr, i);
 		}
 
+	brcmf_cfg80211_detach(drvr->config);
+
 	brcmf_bus_detach(drvr);
 
-	if (drvr->prot)
-		brcmf_proto_detach(drvr);
+	brcmf_proto_detach(drvr);
 
 	brcmf_fws_deinit(drvr);
 
@@ -1196,6 +1051,14 @@ void brcmf_detach(struct device *dev)
 	kfree(drvr);
 }
 
+s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_if *ifp = bus_if->drvr->iflist[0];
+
+	return brcmf_fil_iovar_data_set(ifp, name, data, len);
+}
+
 static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
 {
 	return atomic_read(&ifp->pend_8021x_cnt);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
deleted file mode 100644
index 53c6e710f2cb..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCMF_PROTO_H_
-#define _BRCMF_PROTO_H_
-
-/*
- * Exported from the brcmf protocol module (brcmf_cdc)
- */
-
-/* Linkage, sets prot link and updates hdrlen in pub */
-int brcmf_proto_attach(struct brcmf_pub *drvr);
-
-/* Unlink, frees allocated protocol memory (including brcmf_proto) */
-void brcmf_proto_detach(struct brcmf_pub *drvr);
-
-/* Stop protocol: sync w/dongle state. */
-void brcmf_proto_stop(struct brcmf_pub *drvr);
-
-/* Add any protocol-specific data header.
- * Caller must reserve prot_hdrlen prepend space.
- */
-void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
-			 struct sk_buff *txp);
-
-/* Sets dongle media info (drv_version, mac address). */
-int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
-
-#endif				/* _BRCMF_PROTO_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index b02953c4ade7..3e991897d7ca 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -32,6 +32,7 @@
 #include <linux/debugfs.h>
 #include <linux/vmalloc.h>
 #include <linux/platform_data/brcmfmac-sdio.h>
+#include <linux/moduleparam.h>
 #include <asm/unaligned.h>
 #include <defs.h>
 #include <brcmu_wifi.h>
@@ -40,6 +41,7 @@
 #include <soc.h>
 #include "sdio_host.h"
 #include "sdio_chip.h"
+#include "nvram.h"
 
 #define DCMD_RESP_TIMEOUT  2000	/* In milli second */
 
@@ -110,6 +112,8 @@ struct rte_console {
 #define BRCMF_TXBOUND	20	/* Default for max tx frames in
 				 one scheduling */
 
+#define BRCMF_DEFAULT_TXGLOM_SIZE	32  /* max tx frames in glom chain */
+
 #define BRCMF_TXMINMAX	1	/* Max tx frames if rx still pending */
 
 #define MEMBLOCK	2048	/* Block size used for downloading
@@ -257,9 +261,6 @@ struct rte_console {
 #define MAX_HDR_READ	(1 << 6)
 #define MAX_RX_DATASZ	2048
 
-/* Maximum milliseconds to wait for F2 to come up */
-#define BRCMF_WAIT_F2RDY	3000
-
 /* Bump up limit on waiting for HT to account for first startup;
  * if the image is doing a CRC calculation before programming the PMU
  * for HT availability, it could take a couple hundred ms more, so
@@ -360,15 +361,15 @@ struct brcmf_sdio_hdrinfo {
 	u16 len_left;
 	u16 len_nxtfrm;
 	u8 dat_offset;
+	bool lastfrm;
+	u16 tail_pad;
 };
 
 /* misc chip info needed by some of the routines */
 /* Private data for SDIO bus interaction */
 struct brcmf_sdio {
 	struct brcmf_sdio_dev *sdiodev;	/* sdio device handler */
-	struct chip_info *ci;	/* Chip info struct */
-	char *vars;		/* Variables (from CIS and/or other) */
-	uint varsz;		/* Size of variables buffer */
+	struct brcmf_chip *ci;	/* Chip info struct */
 
 	u32 ramsize;		/* Size of RAM in SOCRAM (bytes) */
 
@@ -384,7 +385,7 @@ struct brcmf_sdio {
 	u8 tx_seq;		/* Transmit sequence number (next) */
 	u8 tx_max;		/* Maximum transmit sequence allowed */
 
-	u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
+	u8 *hdrbuf;		/* buffer for handling rx frame */
 	u8 *rxhdr;		/* Header of current rx frame (in hdrbuf) */
 	u8 rx_seq;		/* Receive sequence number (expected) */
 	struct brcmf_sdio_hdrinfo cur_read;
@@ -455,6 +456,10 @@ struct brcmf_sdio {
 	bool sleeping; /* SDIO bus sleeping */
 
 	u8 tx_hdrlen;		/* sdio bus header length for tx packet */
+	bool txglom;		/* host tx glomming enable flag */
+	struct sk_buff *txglom_sgpad;	/* scatter-gather padding buffer */
+	u16 head_align;		/* buffer pointer alignment */
+	u16 sgentry_align;	/* scatter-gather buffer alignment */
 };
 
 /* clkstate */
@@ -479,6 +484,10 @@ static const uint max_roundup = 512;
 
 #define ALIGNMENT  4
 
+static int brcmf_sdio_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
+module_param_named(txglomsz, brcmf_sdio_txglomsz, int, 0);
+MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
+
 enum brcmf_sdio_frmtype {
 	BRCMF_SDIO_FT_NORMAL,
 	BRCMF_SDIO_FT_SUPER,
@@ -499,6 +508,10 @@ enum brcmf_sdio_frmtype {
 #define BCM4334_NVRAM_NAME		"brcm/brcmfmac4334-sdio.txt"
 #define BCM4335_FIRMWARE_NAME		"brcm/brcmfmac4335-sdio.bin"
 #define BCM4335_NVRAM_NAME		"brcm/brcmfmac4335-sdio.txt"
+#define BCM43362_FIRMWARE_NAME		"brcm/brcmfmac43362-sdio.bin"
+#define BCM43362_NVRAM_NAME		"brcm/brcmfmac43362-sdio.txt"
+#define BCM4339_FIRMWARE_NAME		"brcm/brcmfmac4339-sdio.bin"
+#define BCM4339_NVRAM_NAME		"brcm/brcmfmac4339-sdio.txt"
 
 MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
 MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
@@ -514,6 +527,10 @@ MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME);
 MODULE_FIRMWARE(BCM4334_NVRAM_NAME);
 MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME);
 MODULE_FIRMWARE(BCM4335_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43362_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
 
 struct brcmf_firmware_names {
 	u32 chipid;
@@ -537,11 +554,13 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
 	{ BCM4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
 	{ BCM4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
 	{ BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
-	{ BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) }
+	{ BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
+	{ BCM43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
+	{ BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) }
 };
 
 
-static const struct firmware *brcmf_sdbrcm_get_fw(struct brcmf_sdio *bus,
+static const struct firmware *brcmf_sdio_get_fw(struct brcmf_sdio *bus,
 						  enum brcmf_firmware_type type)
 {
 	const struct firmware *fw;
@@ -606,8 +625,8 @@ r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
 	u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
 	int ret;
 
-	*regvar = brcmf_sdio_regrl(bus->sdiodev,
-				   bus->ci->c_inf[idx].base + offset, &ret);
+	*regvar = brcmf_sdiod_regrl(bus->sdiodev,
+				    bus->ci->c_inf[idx].base + offset, &ret);
 
 	return ret;
 }
@@ -618,15 +637,15 @@ w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
 	u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
 	int ret;
 
-	brcmf_sdio_regwl(bus->sdiodev,
-			 bus->ci->c_inf[idx].base + reg_offset,
-			 regval, &ret);
+	brcmf_sdiod_regwl(bus->sdiodev,
+			  bus->ci->c_inf[idx].base + reg_offset,
+			  regval, &ret);
 
 	return ret;
 }
 
 static int
-brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
+brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
 {
 	u8 wr_val = 0, rd_val, cmp_val, bmask;
 	int err = 0;
@@ -636,8 +655,8 @@ brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
 
 	wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
 	/* 1st KSO write goes to AOS wake up core if device is asleep  */
-	brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
-			 wr_val, &err);
+	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+			  wr_val, &err);
 	if (err) {
 		brcmf_err("SDIO_AOS KSO write error: %d\n", err);
 		return err;
@@ -667,15 +686,15 @@ brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
 		 * just one write attempt may fail,
 		 * read it back until it matches written value
 		 */
-		rd_val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
-					  &err);
+		rd_val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+					   &err);
 		if (((rd_val & bmask) == cmp_val) && !err)
 			break;
 		brcmf_dbg(SDIO, "KSO wr/rd retry:%d (max: %d) ERR:%x\n",
 			  try_cnt, MAX_KSO_ATTEMPTS, err);
 		udelay(KSO_WAIT_US);
-		brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
-				 wr_val, &err);
+		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+				  wr_val, &err);
 	} while (try_cnt++ < MAX_KSO_ATTEMPTS);
 
 	return err;
@@ -686,7 +705,7 @@ brcmf_sdbrcm_kso_control(struct brcmf_sdio *bus, bool on)
 #define HOSTINTMASK		(I_HMB_SW_MASK | I_CHIPACTIVE)
 
 /* Turn backplane clock on or off */
-static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
+static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
 {
 	int err;
 	u8 clkctl, clkreq, devctl;
@@ -706,16 +725,16 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
 		clkreq =
 		    bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
 
-		brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-				 clkreq, &err);
+		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+				  clkreq, &err);
 		if (err) {
 			brcmf_err("HT Avail request error: %d\n", err);
 			return -EBADE;
 		}
 
 		/* Check current status */
-		clkctl = brcmf_sdio_regrb(bus->sdiodev,
-					  SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
 		if (err) {
 			brcmf_err("HT Avail read error: %d\n", err);
 			return -EBADE;
@@ -724,8 +743,8 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
 		/* Go to pending and await interrupt if appropriate */
 		if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
 			/* Allow only clock-available interrupt */
-			devctl = brcmf_sdio_regrb(bus->sdiodev,
-						  SBSDIO_DEVICE_CTL, &err);
+			devctl = brcmf_sdiod_regrb(bus->sdiodev,
+						   SBSDIO_DEVICE_CTL, &err);
 			if (err) {
 				brcmf_err("Devctl error setting CA: %d\n",
 					  err);
@@ -733,28 +752,28 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
 			}
 
 			devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
-			brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
-					 devctl, &err);
+			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+					  devctl, &err);
 			brcmf_dbg(SDIO, "CLKCTL: set PENDING\n");
 			bus->clkstate = CLK_PENDING;
 
 			return 0;
 		} else if (bus->clkstate == CLK_PENDING) {
 			/* Cancel CA-only interrupt filter */
-			devctl = brcmf_sdio_regrb(bus->sdiodev,
-						  SBSDIO_DEVICE_CTL, &err);
+			devctl = brcmf_sdiod_regrb(bus->sdiodev,
+						   SBSDIO_DEVICE_CTL, &err);
 			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
-			brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
-					 devctl, &err);
+			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+					  devctl, &err);
 		}
 
 		/* Otherwise, wait here (polling) for HT Avail */
 		timeout = jiffies +
 			  msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
 		while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
-			clkctl = brcmf_sdio_regrb(bus->sdiodev,
-						  SBSDIO_FUNC1_CHIPCLKCSR,
-						  &err);
+			clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+						   SBSDIO_FUNC1_CHIPCLKCSR,
+						   &err);
 			if (time_after(jiffies, timeout))
 				break;
 			else
@@ -787,16 +806,16 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
 
 		if (bus->clkstate == CLK_PENDING) {
 			/* Cancel CA-only interrupt filter */
-			devctl = brcmf_sdio_regrb(bus->sdiodev,
-						  SBSDIO_DEVICE_CTL, &err);
+			devctl = brcmf_sdiod_regrb(bus->sdiodev,
+						   SBSDIO_DEVICE_CTL, &err);
 			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
-			brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
-					 devctl, &err);
+			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+					  devctl, &err);
 		}
 
 		bus->clkstate = CLK_SDONLY;
-		brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-				 clkreq, &err);
+		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+				  clkreq, &err);
 		brcmf_dbg(SDIO, "CLKCTL: turned OFF\n");
 		if (err) {
 			brcmf_err("Failed access turning clock off: %d\n",
@@ -808,7 +827,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
 }
 
 /* Change idle/active SD state */
-static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
+static int brcmf_sdio_sdclk(struct brcmf_sdio *bus, bool on)
 {
 	brcmf_dbg(SDIO, "Enter\n");
 
@@ -821,7 +840,7 @@ static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
 }
 
 /* Transition SD and backplane clock readiness */
-static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
+static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
 {
 #ifdef DEBUG
 	uint oldstate = bus->clkstate;
@@ -832,7 +851,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
 	/* Early exit if we're already there */
 	if (bus->clkstate == target) {
 		if (target == CLK_AVAIL) {
-			brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+			brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
 			bus->activity = true;
 		}
 		return 0;
@@ -842,32 +861,32 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
 	case CLK_AVAIL:
 		/* Make sure SD clock is available */
 		if (bus->clkstate == CLK_NONE)
-			brcmf_sdbrcm_sdclk(bus, true);
+			brcmf_sdio_sdclk(bus, true);
 		/* Now request HT Avail on the backplane */
-		brcmf_sdbrcm_htclk(bus, true, pendok);
-		brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+		brcmf_sdio_htclk(bus, true, pendok);
+		brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
 		bus->activity = true;
 		break;
 
 	case CLK_SDONLY:
 		/* Remove HT request, or bring up SD clock */
 		if (bus->clkstate == CLK_NONE)
-			brcmf_sdbrcm_sdclk(bus, true);
+			brcmf_sdio_sdclk(bus, true);
 		else if (bus->clkstate == CLK_AVAIL)
-			brcmf_sdbrcm_htclk(bus, false, false);
+			brcmf_sdio_htclk(bus, false, false);
 		else
 			brcmf_err("request for %d -> %d\n",
 				  bus->clkstate, target);
-		brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+		brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
 		break;
 
 	case CLK_NONE:
 		/* Make sure to remove HT request */
 		if (bus->clkstate == CLK_AVAIL)
-			brcmf_sdbrcm_htclk(bus, false, false);
+			brcmf_sdio_htclk(bus, false, false);
 		/* Now remove the SD clock */
-		brcmf_sdbrcm_sdclk(bus, false);
-		brcmf_sdbrcm_wd_timer(bus, 0);
+		brcmf_sdio_sdclk(bus, false);
+		brcmf_sdio_wd_timer(bus, 0);
 		break;
 	}
 #ifdef DEBUG
@@ -878,7 +897,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
 }
 
 static int
-brcmf_sdbrcm_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
+brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
 {
 	int err = 0;
 	brcmf_dbg(TRACE, "Enter\n");
@@ -901,13 +920,13 @@ brcmf_sdbrcm_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
 			    brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
 			    data_ok(bus)))
 				 return -EBUSY;
-			err = brcmf_sdbrcm_kso_control(bus, false);
+			err = brcmf_sdio_kso_control(bus, false);
 			/* disable watchdog */
 			if (!err)
-				brcmf_sdbrcm_wd_timer(bus, 0);
+				brcmf_sdio_wd_timer(bus, 0);
 		} else {
 			bus->idlecount = 0;
-			err = brcmf_sdbrcm_kso_control(bus, true);
+			err = brcmf_sdio_kso_control(bus, true);
 		}
 		if (!err) {
 			/* Change state */
@@ -925,16 +944,16 @@ end:
 	/* control clocks */
 	if (sleep) {
 		if (!bus->sr_enabled)
-			brcmf_sdbrcm_clkctl(bus, CLK_NONE, pendok);
+			brcmf_sdio_clkctl(bus, CLK_NONE, pendok);
 	} else {
-		brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, pendok);
+		brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
 	}
 
 	return err;
 
 }
 
-static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
+static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
 {
 	u32 intstatus = 0;
 	u32 hmb_data;
@@ -1010,7 +1029,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
 	return intstatus;
 }
 
-static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
+static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
 {
 	uint retries = 0;
 	u16 lastrbc;
@@ -1022,18 +1041,18 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
 		  rtx ? ", send NAK" : "");
 
 	if (abort)
-		brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
+		brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
 
-	brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
-			 SFC_RF_TERM, &err);
+	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+			  SFC_RF_TERM, &err);
 	bus->sdcnt.f1regdata++;
 
 	/* Wait until the packet has been flushed (device/FIFO stable) */
 	for (lastrbc = retries = 0xffff; retries > 0; retries--) {
-		hi = brcmf_sdio_regrb(bus->sdiodev,
-				      SBSDIO_FUNC1_RFRAMEBCHI, &err);
-		lo = brcmf_sdio_regrb(bus->sdiodev,
-				      SBSDIO_FUNC1_RFRAMEBCLO, &err);
+		hi = brcmf_sdiod_regrb(bus->sdiodev,
+				       SBSDIO_FUNC1_RFRAMEBCHI, &err);
+		lo = brcmf_sdiod_regrb(bus->sdiodev,
+				       SBSDIO_FUNC1_RFRAMEBCLO, &err);
 		bus->sdcnt.f1regdata += 2;
 
 		if ((hi == 0) && (lo == 0))
@@ -1063,14 +1082,10 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
 
 	/* Clear partial in any case */
 	bus->cur_read.len = 0;
-
-	/* If we can't reach the device, signal failure */
-	if (err)
-		bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
 }
 
 /* return total length of buffer chain */
-static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
+static uint brcmf_sdio_glom_len(struct brcmf_sdio *bus)
 {
 	struct sk_buff *p;
 	uint total;
@@ -1081,7 +1096,7 @@ static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
 	return total;
 }
 
-static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
+static void brcmf_sdio_free_glom(struct brcmf_sdio *bus)
 {
 	struct sk_buff *cur, *next;
 
@@ -1097,10 +1112,18 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
  * host and WiFi dongle which contains information needed for SDIO core and
  * firmware
  *
- * It consists of 2 parts: hw header and software header
+ * It consists of 3 parts: hardware header, hardware extension header and
+ * software header
  * hardware header (frame tag) - 4 bytes
  * Byte 0~1: Frame length
  * Byte 2~3: Checksum, bit-wise inverse of frame length
+ * hardware extension header - 8 bytes
+ * Tx glom mode only, N/A for Rx or normal Tx
+ * Byte 0~1: Packet length excluding hw frame tag
+ * Byte 2: Reserved
+ * Byte 3: Frame flags, bit 0: last frame indication
+ * Byte 4~5: Reserved
+ * Byte 6~7: Tail padding length
  * software header - 8 bytes
  * Byte 0: Rx/Tx sequence number
  * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
@@ -1111,6 +1134,7 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
  * Byte 6~7: Reserved
  */
 #define SDPCM_HWHDR_LEN			4
+#define SDPCM_HWEXT_LEN			8
 #define SDPCM_SWHDR_LEN			8
 #define SDPCM_HDRLEN			(SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN)
 /* software header */
@@ -1147,7 +1171,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
 	u8 rx_seq, fc, tx_seq_max;
 	u32 swheader;
 
-	trace_brcmf_sdpcm_hdr(false, header);
+	trace_brcmf_sdpcm_hdr(SDPCM_RX, header);
 
 	/* hw header */
 	len = get_unaligned_le16(header);
@@ -1160,7 +1184,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
 	if ((u16)(~(len ^ checksum))) {
 		brcmf_err("HW header checksum error\n");
 		bus->sdcnt.rx_badhdr++;
-		brcmf_sdbrcm_rxfail(bus, false, false);
+		brcmf_sdio_rxfail(bus, false, false);
 		return -EIO;
 	}
 	if (len < SDPCM_HDRLEN) {
@@ -1192,7 +1216,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
 	    type != BRCMF_SDIO_FT_SUPER) {
 		brcmf_err("HW header length too long\n");
 		bus->sdcnt.rx_toolong++;
-		brcmf_sdbrcm_rxfail(bus, false, false);
+		brcmf_sdio_rxfail(bus, false, false);
 		rd->len = 0;
 		return -EPROTO;
 	}
@@ -1211,7 +1235,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
 	if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
 		brcmf_err("seq %d: bad data offset\n", rx_seq);
 		bus->sdcnt.rx_badhdr++;
-		brcmf_sdbrcm_rxfail(bus, false, false);
+		brcmf_sdio_rxfail(bus, false, false);
 		rd->len = 0;
 		return -ENXIO;
 	}
@@ -1260,25 +1284,34 @@ static inline void brcmf_sdio_update_hwhdr(u8 *header, u16 frm_length)
 static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
 			      struct brcmf_sdio_hdrinfo *hd_info)
 {
-	u32 sw_header;
+	u32 hdrval;
+	u8 hdr_offset;
 
 	brcmf_sdio_update_hwhdr(header, hd_info->len);
-
-	sw_header = bus->tx_seq;
-	sw_header |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) &
-		     SDPCM_CHANNEL_MASK;
-	sw_header |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) &
-		     SDPCM_DOFFSET_MASK;
-	*(((__le32 *)header) + 1) = cpu_to_le32(sw_header);
-	*(((__le32 *)header) + 2) = 0;
-	trace_brcmf_sdpcm_hdr(true, header);
+	hdr_offset = SDPCM_HWHDR_LEN;
+
+	if (bus->txglom) {
+		hdrval = (hd_info->len - hdr_offset) | (hd_info->lastfrm << 24);
+		*((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval);
+		hdrval = (u16)hd_info->tail_pad << 16;
+		*(((__le32 *)(header + hdr_offset)) + 1) = cpu_to_le32(hdrval);
+		hdr_offset += SDPCM_HWEXT_LEN;
+	}
+
+	hdrval = hd_info->seq_num;
+	hdrval |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) &
+		  SDPCM_CHANNEL_MASK;
+	hdrval |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) &
+		  SDPCM_DOFFSET_MASK;
+	*((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval);
+	*(((__le32 *)(header + hdr_offset)) + 1) = 0;
+	trace_brcmf_sdpcm_hdr(SDPCM_TX + !!(bus->txglom), header);
 }
 
-static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
+static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 {
 	u16 dlen, totlen;
 	u8 *dptr, num = 0;
-	u32 align = 0;
 	u16 sublen;
 	struct sk_buff *pfirst, *pnext;
 
@@ -1293,11 +1326,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 	brcmf_dbg(SDIO, "start: glomd %p glom %p\n",
 		  bus->glomd, skb_peek(&bus->glom));
 
-	if (bus->sdiodev->pdata)
-		align = bus->sdiodev->pdata->sd_sgentry_align;
-	if (align < 4)
-		align = 4;
-
 	/* If there's a descriptor, generate the packet chain */
 	if (bus->glomd) {
 		pfirst = pnext = NULL;
@@ -1321,9 +1349,9 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 				pnext = NULL;
 				break;
 			}
-			if (sublen % align) {
+			if (sublen % bus->sgentry_align) {
 				brcmf_err("sublen %d not multiple of %d\n",
-					  sublen, align);
+					  sublen, bus->sgentry_align);
 			}
 			totlen += sublen;
 
@@ -1336,7 +1364,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 			}
 
 			/* Allocate/chain packet for next subframe */
-			pnext = brcmu_pkt_buf_get_skb(sublen + align);
+			pnext = brcmu_pkt_buf_get_skb(sublen + bus->sgentry_align);
 			if (pnext == NULL) {
 				brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
 					  num, sublen);
@@ -1345,7 +1373,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 			skb_queue_tail(&bus->glom, pnext);
 
 			/* Adhere to start alignment requirements */
-			pkt_align(pnext, sublen, align);
+			pkt_align(pnext, sublen, bus->sgentry_align);
 		}
 
 		/* If all allocations succeeded, save packet chain
@@ -1360,7 +1388,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 			}
 			pfirst = pnext = NULL;
 		} else {
-			brcmf_sdbrcm_free_glom(bus);
+			brcmf_sdio_free_glom(bus);
 			num = 0;
 		}
 
@@ -1383,16 +1411,15 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 		}
 
 		pfirst = skb_peek(&bus->glom);
-		dlen = (u16) brcmf_sdbrcm_glom_len(bus);
+		dlen = (u16) brcmf_sdio_glom_len(bus);
 
 		/* Do an SDIO read for the superframe.  Configurable iovar to
 		 * read directly into the chained packet, or allocate a large
 		 * packet and and copy into the chain.
 		 */
 		sdio_claim_host(bus->sdiodev->func[1]);
-		errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
-				bus->sdiodev->sbwad,
-				SDIO_FUNC_2, F2SYNC, &bus->glom, dlen);
+		errcode = brcmf_sdiod_recv_chain(bus->sdiodev,
+						 &bus->glom, dlen);
 		sdio_release_host(bus->sdiodev->func[1]);
 		bus->sdcnt.f2rxdata++;
 
@@ -1403,12 +1430,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 
 			sdio_claim_host(bus->sdiodev->func[1]);
 			if (bus->glomerr++ < 3) {
-				brcmf_sdbrcm_rxfail(bus, true, true);
+				brcmf_sdio_rxfail(bus, true, true);
 			} else {
 				bus->glomerr = 0;
-				brcmf_sdbrcm_rxfail(bus, true, false);
+				brcmf_sdio_rxfail(bus, true, false);
 				bus->sdcnt.rxglomfail++;
-				brcmf_sdbrcm_free_glom(bus);
+				brcmf_sdio_free_glom(bus);
 			}
 			sdio_release_host(bus->sdiodev->func[1]);
 			return 0;
@@ -1456,12 +1483,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 			if (bus->glomerr++ < 3) {
 				/* Restore superframe header space */
 				skb_push(pfirst, sfdoff);
-				brcmf_sdbrcm_rxfail(bus, true, true);
+				brcmf_sdio_rxfail(bus, true, true);
 			} else {
 				bus->glomerr = 0;
-				brcmf_sdbrcm_rxfail(bus, true, false);
+				brcmf_sdio_rxfail(bus, true, false);
 				bus->sdcnt.rxglomfail++;
-				brcmf_sdbrcm_free_glom(bus);
+				brcmf_sdio_free_glom(bus);
 			}
 			sdio_release_host(bus->sdiodev->func[1]);
 			bus->cur_read.len = 0;
@@ -1505,8 +1532,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 	return num;
 }
 
-static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
-					bool *pending)
+static int brcmf_sdio_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
+				     bool *pending)
 {
 	DECLARE_WAITQUEUE(wait, current);
 	int timeout = msecs_to_jiffies(DCMD_RESP_TIMEOUT);
@@ -1527,7 +1554,7 @@ static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
 	return timeout;
 }
 
-static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
+static int brcmf_sdio_dcmd_resp_wake(struct brcmf_sdio *bus)
 {
 	if (waitqueue_active(&bus->dcmd_resp_wait))
 		wake_up_interruptible(&bus->dcmd_resp_wait);
@@ -1535,7 +1562,7 @@ static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
 	return 0;
 }
 static void
-brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
+brcmf_sdio_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
 {
 	uint rdlen, pad;
 	u8 *buf = NULL, *rbuf;
@@ -1549,9 +1576,9 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
 		goto done;
 
 	rbuf = bus->rxbuf;
-	pad = ((unsigned long)rbuf % BRCMF_SDALIGN);
+	pad = ((unsigned long)rbuf % bus->head_align);
 	if (pad)
-		rbuf += (BRCMF_SDALIGN - pad);
+		rbuf += (bus->head_align - pad);
 
 	/* Copy the already-read portion over */
 	memcpy(buf, hdr, BRCMF_FIRSTREAD);
@@ -1565,19 +1592,15 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
 		if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
 		    ((len + pad) < bus->sdiodev->bus_if->maxctl))
 			rdlen += pad;
-	} else if (rdlen % BRCMF_SDALIGN) {
-		rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
+	} else if (rdlen % bus->head_align) {
+		rdlen += bus->head_align - (rdlen % bus->head_align);
 	}
 
-	/* Satisfy length-alignment requirements */
-	if (rdlen & (ALIGNMENT - 1))
-		rdlen = roundup(rdlen, ALIGNMENT);
-
 	/* Drop if the read is too big or it exceeds our maximum */
 	if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
 		brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
 			  rdlen, bus->sdiodev->bus_if->maxctl);
-		brcmf_sdbrcm_rxfail(bus, false, false);
+		brcmf_sdio_rxfail(bus, false, false);
 		goto done;
 	}
 
@@ -1585,15 +1608,12 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
 		brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
 			  len, len - doff, bus->sdiodev->bus_if->maxctl);
 		bus->sdcnt.rx_toolong++;
-		brcmf_sdbrcm_rxfail(bus, false, false);
+		brcmf_sdio_rxfail(bus, false, false);
 		goto done;
 	}
 
 	/* Read remain of frame body */
-	sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
-				bus->sdiodev->sbwad,
-				SDIO_FUNC_2,
-				F2SYNC, rbuf, rdlen);
+	sdret = brcmf_sdiod_recv_buf(bus->sdiodev, rbuf, rdlen);
 	bus->sdcnt.f2rxdata++;
 
 	/* Control frame failures need retransmission */
@@ -1601,7 +1621,7 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
 		brcmf_err("read %d control bytes failed: %d\n",
 			  rdlen, sdret);
 		bus->sdcnt.rxc_errors++;
-		brcmf_sdbrcm_rxfail(bus, true, true);
+		brcmf_sdio_rxfail(bus, true, true);
 		goto done;
 	} else
 		memcpy(buf + BRCMF_FIRSTREAD, rbuf, rdlen);
@@ -1626,19 +1646,19 @@ gotpkt:
 
 done:
 	/* Awake any waiters */
-	brcmf_sdbrcm_dcmd_resp_wake(bus);
+	brcmf_sdio_dcmd_resp_wake(bus);
 }
 
 /* Pad read to blocksize for efficiency */
-static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
+static void brcmf_sdio_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
 {
 	if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
 		*pad = bus->blocksize - (*rdlen % bus->blocksize);
 		if (*pad <= bus->roundup && *pad < bus->blocksize &&
 		    *rdlen + *pad + BRCMF_FIRSTREAD < MAX_RX_DATASZ)
 			*rdlen += *pad;
-	} else if (*rdlen % BRCMF_SDALIGN) {
-		*rdlen += BRCMF_SDALIGN - (*rdlen % BRCMF_SDALIGN);
+	} else if (*rdlen % bus->head_align) {
+		*rdlen += bus->head_align - (*rdlen % bus->head_align);
 	}
 }
 
@@ -1658,8 +1678,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 	bus->rxpending = true;
 
 	for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
-	     !bus->rxskip && rxleft &&
-	     bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN;
+	     !bus->rxskip && rxleft && brcmf_bus_ready(bus->sdiodev->bus_if);
 	     rd->seq_num++, rxleft--) {
 
 		/* Handle glomming separately */
@@ -1667,7 +1686,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 			u8 cnt;
 			brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
 				  bus->glomd, skb_peek(&bus->glom));
-			cnt = brcmf_sdbrcm_rxglom(bus, rd->seq_num);
+			cnt = brcmf_sdio_rxglom(bus, rd->seq_num);
 			brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
 			rd->seq_num += cnt - 1;
 			rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
@@ -1678,17 +1697,14 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 		/* read header first for unknow frame length */
 		sdio_claim_host(bus->sdiodev->func[1]);
 		if (!rd->len) {
-			ret = brcmf_sdcard_recv_buf(bus->sdiodev,
-						      bus->sdiodev->sbwad,
-						      SDIO_FUNC_2, F2SYNC,
-						      bus->rxhdr,
-						      BRCMF_FIRSTREAD);
+			ret = brcmf_sdiod_recv_buf(bus->sdiodev,
+						   bus->rxhdr, BRCMF_FIRSTREAD);
 			bus->sdcnt.f2rxhdrs++;
 			if (ret < 0) {
 				brcmf_err("RXHEADER FAILED: %d\n",
 					  ret);
 				bus->sdcnt.rx_hdrfail++;
-				brcmf_sdbrcm_rxfail(bus, true, true);
+				brcmf_sdio_rxfail(bus, true, true);
 				sdio_release_host(bus->sdiodev->func[1]);
 				continue;
 			}
@@ -1707,9 +1723,9 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 			}
 
 			if (rd->channel == SDPCM_CONTROL_CHANNEL) {
-				brcmf_sdbrcm_read_control(bus, bus->rxhdr,
-							  rd->len,
-							  rd->dat_offset);
+				brcmf_sdio_read_control(bus, bus->rxhdr,
+							rd->len,
+							rd->dat_offset);
 				/* prepare the descriptor for the next read */
 				rd->len = rd->len_nxtfrm << 4;
 				rd->len_nxtfrm = 0;
@@ -1723,23 +1739,22 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 			head_read = BRCMF_FIRSTREAD;
 		}
 
-		brcmf_pad(bus, &pad, &rd->len_left);
+		brcmf_sdio_pad(bus, &pad, &rd->len_left);
 
 		pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
-					    BRCMF_SDALIGN);
+					    bus->head_align);
 		if (!pkt) {
 			/* Give up on data, request rtx of events */
 			brcmf_err("brcmu_pkt_buf_get_skb failed\n");
-			brcmf_sdbrcm_rxfail(bus, false,
+			brcmf_sdio_rxfail(bus, false,
 					    RETRYCHAN(rd->channel));
 			sdio_release_host(bus->sdiodev->func[1]);
 			continue;
 		}
 		skb_pull(pkt, head_read);
-		pkt_align(pkt, rd->len_left, BRCMF_SDALIGN);
+		pkt_align(pkt, rd->len_left, bus->head_align);
 
-		ret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
-					      SDIO_FUNC_2, F2SYNC, pkt);
+		ret = brcmf_sdiod_recv_pkt(bus->sdiodev, pkt);
 		bus->sdcnt.f2rxdata++;
 		sdio_release_host(bus->sdiodev->func[1]);
 
@@ -1748,7 +1763,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 				  rd->len, rd->channel, ret);
 			brcmu_pkt_buf_free_skb(pkt);
 			sdio_claim_host(bus->sdiodev->func[1]);
-			brcmf_sdbrcm_rxfail(bus, true,
+			brcmf_sdio_rxfail(bus, true,
 					    RETRYCHAN(rd->channel));
 			sdio_release_host(bus->sdiodev->func[1]);
 			continue;
@@ -1773,7 +1788,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 					  rd->len,
 					  roundup(rd_new.len, 16) >> 4);
 				rd->len = 0;
-				brcmf_sdbrcm_rxfail(bus, true, true);
+				brcmf_sdio_rxfail(bus, true, true);
 				sdio_release_host(bus->sdiodev->func[1]);
 				brcmu_pkt_buf_free_skb(pkt);
 				continue;
@@ -1795,7 +1810,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 				/* Force retry w/normal header read */
 				rd->len = 0;
 				sdio_claim_host(bus->sdiodev->func[1]);
-				brcmf_sdbrcm_rxfail(bus, false, true);
+				brcmf_sdio_rxfail(bus, false, true);
 				sdio_release_host(bus->sdiodev->func[1]);
 				brcmu_pkt_buf_free_skb(pkt);
 				continue;
@@ -1820,7 +1835,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 				brcmf_err("%s: glom superframe w/o "
 					  "descriptor!\n", __func__);
 				sdio_claim_host(bus->sdiodev->func[1]);
-				brcmf_sdbrcm_rxfail(bus, false, false);
+				brcmf_sdio_rxfail(bus, false, false);
 				sdio_release_host(bus->sdiodev->func[1]);
 			}
 			/* prepare the descriptor for the next read */
@@ -1864,13 +1879,36 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 }
 
 static void
-brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
+brcmf_sdio_wait_event_wakeup(struct brcmf_sdio *bus)
 {
 	if (waitqueue_active(&bus->ctrl_wait))
 		wake_up_interruptible(&bus->ctrl_wait);
 	return;
 }
 
+static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
+{
+	u16 head_pad;
+	u8 *dat_buf;
+
+	dat_buf = (u8 *)(pkt->data);
+
+	/* Check head padding */
+	head_pad = ((unsigned long)dat_buf % bus->head_align);
+	if (head_pad) {
+		if (skb_headroom(pkt) < head_pad) {
+			bus->sdiodev->bus_if->tx_realloc++;
+			head_pad = 0;
+			if (skb_cow(pkt, head_pad))
+				return -ENOMEM;
+		}
+		skb_push(pkt, head_pad);
+		dat_buf = (u8 *)(pkt->data);
+		memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
+	}
+	return head_pad;
+}
+
 /**
  * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
  * bus layer usage.
@@ -1880,32 +1918,40 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
 /* bit mask of data length chopped from the previous packet */
 #define ALIGN_SKB_CHOP_LEN_MASK	0x7fff
 
-static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio_dev *sdiodev,
+static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
 				    struct sk_buff_head *pktq,
-				    struct sk_buff *pkt, uint chan)
+				    struct sk_buff *pkt, u16 total_len)
 {
+	struct brcmf_sdio_dev *sdiodev;
 	struct sk_buff *pkt_pad;
-	u16 tail_pad, tail_chop, sg_align;
+	u16 tail_pad, tail_chop, chain_pad;
 	unsigned int blksize;
-	u8 *dat_buf;
-	int ntail;
+	bool lastfrm;
+	int ntail, ret;
 
+	sdiodev = bus->sdiodev;
 	blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
-	sg_align = 4;
-	if (sdiodev->pdata && sdiodev->pdata->sd_sgentry_align > 4)
-		sg_align = sdiodev->pdata->sd_sgentry_align;
 	/* sg entry alignment should be a divisor of block size */
-	WARN_ON(blksize % sg_align);
+	WARN_ON(blksize % bus->sgentry_align);
 
 	/* Check tail padding */
-	pkt_pad = NULL;
-	tail_chop = pkt->len % sg_align;
-	tail_pad = sg_align - tail_chop;
-	tail_pad += blksize - (pkt->len + tail_pad) % blksize;
+	lastfrm = skb_queue_is_last(pktq, pkt);
+	tail_pad = 0;
+	tail_chop = pkt->len % bus->sgentry_align;
+	if (tail_chop)
+		tail_pad = bus->sgentry_align - tail_chop;
+	chain_pad = (total_len + tail_pad) % blksize;
+	if (lastfrm && chain_pad)
+		tail_pad += blksize - chain_pad;
 	if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
-		pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
+		pkt_pad = bus->txglom_sgpad;
+		if (pkt_pad == NULL)
+			  brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
 		if (pkt_pad == NULL)
 			return -ENOMEM;
+		ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad);
+		if (unlikely(ret < 0))
+			return ret;
 		memcpy(pkt_pad->data,
 		       pkt->data + pkt->len - tail_chop,
 		       tail_chop);
@@ -1920,14 +1966,10 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio_dev *sdiodev,
 				return -ENOMEM;
 		if (skb_linearize(pkt))
 			return -ENOMEM;
-		dat_buf = (u8 *)(pkt->data);
 		__skb_put(pkt, tail_pad);
 	}
 
-	if (pkt_pad)
-		return pkt->len + tail_chop;
-	else
-		return pkt->len - tail_pad;
+	return tail_pad;
 }
 
 /**
@@ -1946,58 +1988,66 @@ static int
 brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
 		      uint chan)
 {
-	u16 head_pad, head_align;
+	u16 head_pad, total_len;
 	struct sk_buff *pkt_next;
-	u8 *dat_buf;
-	int err;
+	u8 txseq;
+	int ret;
 	struct brcmf_sdio_hdrinfo hd_info = {0};
 
-	/* SDIO ADMA requires at least 32 bit alignment */
-	head_align = 4;
-	if (bus->sdiodev->pdata && bus->sdiodev->pdata->sd_head_align > 4)
-		head_align = bus->sdiodev->pdata->sd_head_align;
+	txseq = bus->tx_seq;
+	total_len = 0;
+	skb_queue_walk(pktq, pkt_next) {
+		/* alignment packet inserted in previous
+		 * loop cycle can be skipped as it is
+		 * already properly aligned and does not
+		 * need an sdpcm header.
+		 */
+		if (*(u32 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
+			continue;
 
-	pkt_next = pktq->next;
-	dat_buf = (u8 *)(pkt_next->data);
+		/* align packet data pointer */
+		ret = brcmf_sdio_txpkt_hdalign(bus, pkt_next);
+		if (ret < 0)
+			return ret;
+		head_pad = (u16)ret;
+		if (head_pad)
+			memset(pkt_next->data, 0, head_pad + bus->tx_hdrlen);
 
-	/* Check head padding */
-	head_pad = ((unsigned long)dat_buf % head_align);
-	if (head_pad) {
-		if (skb_headroom(pkt_next) < head_pad) {
-			bus->sdiodev->bus_if->tx_realloc++;
-			head_pad = 0;
-			if (skb_cow(pkt_next, head_pad))
-				return -ENOMEM;
-		}
-		skb_push(pkt_next, head_pad);
-		dat_buf = (u8 *)(pkt_next->data);
-		memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
-	}
+		total_len += pkt_next->len;
 
-	if (bus->sdiodev->sg_support && pktq->qlen > 1) {
-		err = brcmf_sdio_txpkt_prep_sg(bus->sdiodev, pktq,
-					       pkt_next, chan);
-		if (err < 0)
-			return err;
-		hd_info.len = (u16)err;
-	} else {
 		hd_info.len = pkt_next->len;
-	}
-
-	hd_info.channel = chan;
-	hd_info.dat_offset = head_pad + bus->tx_hdrlen;
-
-	/* Now fill the header */
-	brcmf_sdio_hdpack(bus, dat_buf, &hd_info);
-
-	if (BRCMF_BYTES_ON() &&
-	    ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
-	     (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
-		brcmf_dbg_hex_dump(true, pkt_next, hd_info.len, "Tx Frame:\n");
-	else if (BRCMF_HDRS_ON())
-		brcmf_dbg_hex_dump(true, pkt_next, head_pad + bus->tx_hdrlen,
-				   "Tx Header:\n");
+		hd_info.lastfrm = skb_queue_is_last(pktq, pkt_next);
+		if (bus->txglom && pktq->qlen > 1) {
+			ret = brcmf_sdio_txpkt_prep_sg(bus, pktq,
+						       pkt_next, total_len);
+			if (ret < 0)
+				return ret;
+			hd_info.tail_pad = (u16)ret;
+			total_len += (u16)ret;
+		}
 
+		hd_info.channel = chan;
+		hd_info.dat_offset = head_pad + bus->tx_hdrlen;
+		hd_info.seq_num = txseq++;
+
+		/* Now fill the header */
+		brcmf_sdio_hdpack(bus, pkt_next->data, &hd_info);
+
+		if (BRCMF_BYTES_ON() &&
+		    ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
+		     (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
+			brcmf_dbg_hex_dump(true, pkt_next, hd_info.len,
+					   "Tx Frame:\n");
+		else if (BRCMF_HDRS_ON())
+			brcmf_dbg_hex_dump(true, pkt_next,
+					   head_pad + bus->tx_hdrlen,
+					   "Tx Header:\n");
+	}
+	/* Hardware length tag of the first packet should be total
+	 * length of the chain (including padding)
+	 */
+	if (bus->txglom)
+		brcmf_sdio_update_hwhdr(pktq->next->data, total_len);
 	return 0;
 }
 
@@ -2015,6 +2065,7 @@ brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
 {
 	u8 *hdr;
 	u32 dat_offset;
+	u16 tail_pad;
 	u32 dummy_flags, chop_len;
 	struct sk_buff *pkt_next, *tmp, *pkt_prev;
 
@@ -2024,42 +2075,41 @@ brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
 			chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
 			if (chop_len) {
 				pkt_prev = pkt_next->prev;
-				memcpy(pkt_prev->data + pkt_prev->len,
-				       pkt_next->data, chop_len);
 				skb_put(pkt_prev, chop_len);
 			}
 			__skb_unlink(pkt_next, pktq);
 			brcmu_pkt_buf_free_skb(pkt_next);
 		} else {
-			hdr = pkt_next->data + SDPCM_HWHDR_LEN;
+			hdr = pkt_next->data + bus->tx_hdrlen - SDPCM_SWHDR_LEN;
 			dat_offset = le32_to_cpu(*(__le32 *)hdr);
 			dat_offset = (dat_offset & SDPCM_DOFFSET_MASK) >>
 				     SDPCM_DOFFSET_SHIFT;
 			skb_pull(pkt_next, dat_offset);
+			if (bus->txglom) {
+				tail_pad = le16_to_cpu(*(__le16 *)(hdr - 2));
+				skb_trim(pkt_next, pkt_next->len - tail_pad);
+			}
 		}
 	}
 }
 
 /* Writes a HW/SW header into the packet and sends it. */
 /* Assumes: (a) header space already there, (b) caller holds lock */
-static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
-			      uint chan)
+static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
+			    uint chan)
 {
 	int ret;
 	int i;
-	struct sk_buff_head localq;
+	struct sk_buff *pkt_next, *tmp;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
-	__skb_queue_head_init(&localq);
-	__skb_queue_tail(&localq, pkt);
-	ret = brcmf_sdio_txpkt_prep(bus, &localq, chan);
+	ret = brcmf_sdio_txpkt_prep(bus, pktq, chan);
 	if (ret)
 		goto done;
 
 	sdio_claim_host(bus->sdiodev->func[1]);
-	ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
-				    SDIO_FUNC_2, F2SYNC, &localq);
+	ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq);
 	bus->sdcnt.f2txdata++;
 
 	if (ret < 0) {
@@ -2068,57 +2118,71 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
 			  ret);
 		bus->sdcnt.tx_sderrs++;
 
-		brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
-		brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
-				 SFC_WF_TERM, NULL);
+		brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
+		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+				  SFC_WF_TERM, NULL);
 		bus->sdcnt.f1regdata++;
 
 		for (i = 0; i < 3; i++) {
 			u8 hi, lo;
-			hi = brcmf_sdio_regrb(bus->sdiodev,
-					      SBSDIO_FUNC1_WFRAMEBCHI, NULL);
-			lo = brcmf_sdio_regrb(bus->sdiodev,
-					      SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+			hi = brcmf_sdiod_regrb(bus->sdiodev,
+					       SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+			lo = brcmf_sdiod_regrb(bus->sdiodev,
+					       SBSDIO_FUNC1_WFRAMEBCLO, NULL);
 			bus->sdcnt.f1regdata += 2;
 			if ((hi == 0) && (lo == 0))
 				break;
 		}
-
 	}
 	sdio_release_host(bus->sdiodev->func[1]);
-	if (ret == 0)
-		bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
 
 done:
-	brcmf_sdio_txpkt_postp(bus, &localq);
-	__skb_dequeue_tail(&localq);
-	brcmf_txcomplete(bus->sdiodev->dev, pkt, ret == 0);
+	brcmf_sdio_txpkt_postp(bus, pktq);
+	if (ret == 0)
+		bus->tx_seq = (bus->tx_seq + pktq->qlen) % SDPCM_SEQ_WRAP;
+	skb_queue_walk_safe(pktq, pkt_next, tmp) {
+		__skb_unlink(pkt_next, pktq);
+		brcmf_txcomplete(bus->sdiodev->dev, pkt_next, ret == 0);
+	}
 	return ret;
 }
 
-static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
+static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
 {
 	struct sk_buff *pkt;
+	struct sk_buff_head pktq;
 	u32 intstatus = 0;
-	int ret = 0, prec_out;
+	int ret = 0, prec_out, i;
 	uint cnt = 0;
-	u8 tx_prec_map;
+	u8 tx_prec_map, pkt_num;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
 	tx_prec_map = ~bus->flowcontrol;
 
 	/* Send frames until the limit or some other event */
-	for (cnt = 0; (cnt < maxframes) && data_ok(bus); cnt++) {
+	for (cnt = 0; (cnt < maxframes) && data_ok(bus);) {
+		pkt_num = 1;
+		__skb_queue_head_init(&pktq);
+		if (bus->txglom)
+			pkt_num = min_t(u8, bus->tx_max - bus->tx_seq,
+					brcmf_sdio_txglomsz);
+		pkt_num = min_t(u32, pkt_num,
+				brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol));
 		spin_lock_bh(&bus->txqlock);
-		pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map, &prec_out);
-		if (pkt == NULL) {
-			spin_unlock_bh(&bus->txqlock);
-			break;
+		for (i = 0; i < pkt_num; i++) {
+			pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map,
+					      &prec_out);
+			if (pkt == NULL)
+				break;
+			__skb_queue_tail(&pktq, pkt);
 		}
 		spin_unlock_bh(&bus->txqlock);
+		if (i == 0)
+			break;
 
-		ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL);
+		ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
+		cnt += i;
 
 		/* In poll mode, need to check for other events */
 		if (!bus->intr && cnt) {
@@ -2146,7 +2210,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
 	return cnt;
 }
 
-static void brcmf_sdbrcm_bus_stop(struct device *dev)
+static void brcmf_sdio_bus_stop(struct device *dev)
 {
 	u32 local_hostintmask;
 	u8 saveclk;
@@ -2163,62 +2227,57 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
 		bus->watchdog_tsk = NULL;
 	}
 
-	sdio_claim_host(bus->sdiodev->func[1]);
-
-	/* Enable clock for device interrupts */
-	brcmf_sdbrcm_bus_sleep(bus, false, false);
+	if (bus_if->state == BRCMF_BUS_DOWN) {
+		sdio_claim_host(sdiodev->func[1]);
+
+		/* Enable clock for device interrupts */
+		brcmf_sdio_bus_sleep(bus, false, false);
+
+		/* Disable and clear interrupts at the chip level also */
+		w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
+		local_hostintmask = bus->hostintmask;
+		bus->hostintmask = 0;
+
+		/* Force backplane clocks to assure F2 interrupt propagates */
+		saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+					    &err);
+		if (!err)
+			brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+					  (saveclk | SBSDIO_FORCE_HT), &err);
+		if (err)
+			brcmf_err("Failed to force clock for F2: err %d\n",
+				  err);
 
-	/* Disable and clear interrupts at the chip level also */
-	w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
-	local_hostintmask = bus->hostintmask;
-	bus->hostintmask = 0;
+		/* Turn off the bus (F2), free any pending packets */
+		brcmf_dbg(INTR, "disable SDIO interrupts\n");
+		sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
 
-	/* Change our idea of bus state */
-	bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+		/* Clear any pending interrupts now that F2 is disabled */
+		w_sdreg32(bus, local_hostintmask,
+			  offsetof(struct sdpcmd_regs, intstatus));
 
-	/* Force clocks on backplane to be sure F2 interrupt propagates */
-	saveclk = brcmf_sdio_regrb(bus->sdiodev,
-				   SBSDIO_FUNC1_CHIPCLKCSR, &err);
-	if (!err) {
-		brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-				 (saveclk | SBSDIO_FORCE_HT), &err);
+		sdio_release_host(sdiodev->func[1]);
 	}
-	if (err)
-		brcmf_err("Failed to force clock for F2: err %d\n", err);
-
-	/* Turn off the bus (F2), free any pending packets */
-	brcmf_dbg(INTR, "disable SDIO interrupts\n");
-	brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, SDIO_FUNC_ENABLE_1,
-			 NULL);
-
-	/* Clear any pending interrupts now that F2 is disabled */
-	w_sdreg32(bus, local_hostintmask,
-		  offsetof(struct sdpcmd_regs, intstatus));
-
-	/* Turn off the backplane clock (only) */
-	brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
-	sdio_release_host(bus->sdiodev->func[1]);
-
 	/* Clear the data packet queues */
 	brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
 
 	/* Clear any held glomming stuff */
 	if (bus->glomd)
 		brcmu_pkt_buf_free_skb(bus->glomd);
-	brcmf_sdbrcm_free_glom(bus);
+	brcmf_sdio_free_glom(bus);
 
 	/* Clear rx control and wake any waiters */
 	spin_lock_bh(&bus->rxctl_lock);
 	bus->rxlen = 0;
 	spin_unlock_bh(&bus->rxctl_lock);
-	brcmf_sdbrcm_dcmd_resp_wake(bus);
+	brcmf_sdio_dcmd_resp_wake(bus);
 
 	/* Reset some F2 state stuff */
 	bus->rxskip = false;
 	bus->tx_seq = bus->rx_seq = 0;
 }
 
-static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
+static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
 {
 	unsigned long flags;
 
@@ -2243,7 +2302,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
 	addr = bus->ci->c_inf[idx].base +
 	       offsetof(struct sdpcmd_regs, intstatus);
 
-	ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, false);
+	val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
 	bus->sdcnt.f1regdata++;
 	if (ret != 0)
 		val = 0;
@@ -2253,7 +2312,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
 
 	/* Clear interrupts */
 	if (val) {
-		ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, true);
+		brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
 		bus->sdcnt.f1regdata++;
 	}
 
@@ -2267,7 +2326,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
 	return ret;
 }
 
-static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
+static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 {
 	u32 newstatus = 0;
 	unsigned long intstatus;
@@ -2286,48 +2345,29 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
 
 #ifdef DEBUG
 		/* Check for inconsistent device control */
-		devctl = brcmf_sdio_regrb(bus->sdiodev,
-					  SBSDIO_DEVICE_CTL, &err);
-		if (err) {
-			brcmf_err("error reading DEVCTL: %d\n", err);
-			bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
-		}
+		devctl = brcmf_sdiod_regrb(bus->sdiodev,
+					   SBSDIO_DEVICE_CTL, &err);
 #endif				/* DEBUG */
 
 		/* Read CSR, if clock on switch to AVAIL, else ignore */
-		clkctl = brcmf_sdio_regrb(bus->sdiodev,
-					  SBSDIO_FUNC1_CHIPCLKCSR, &err);
-		if (err) {
-			brcmf_err("error reading CSR: %d\n",
-				  err);
-			bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
-		}
+		clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
 
 		brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
 			  devctl, clkctl);
 
 		if (SBSDIO_HTAV(clkctl)) {
-			devctl = brcmf_sdio_regrb(bus->sdiodev,
-						  SBSDIO_DEVICE_CTL, &err);
-			if (err) {
-				brcmf_err("error reading DEVCTL: %d\n",
-					  err);
-				bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
-			}
+			devctl = brcmf_sdiod_regrb(bus->sdiodev,
+						   SBSDIO_DEVICE_CTL, &err);
 			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
-			brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
-					 devctl, &err);
-			if (err) {
-				brcmf_err("error writing DEVCTL: %d\n",
-					  err);
-				bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
-			}
+			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+					  devctl, &err);
 			bus->clkstate = CLK_AVAIL;
 		}
 	}
 
 	/* Make sure backplane clock is on */
-	brcmf_sdbrcm_bus_sleep(bus, false, true);
+	brcmf_sdio_bus_sleep(bus, false, true);
 
 	/* Pending interrupt indicates new device status */
 	if (atomic_read(&bus->ipend) > 0) {
@@ -2358,7 +2398,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
 	/* Handle host mailbox indication */
 	if (intstatus & I_HMB_HOST_INT) {
 		intstatus &= ~I_HMB_HOST_INT;
-		intstatus |= brcmf_sdbrcm_hostmail(bus);
+		intstatus |= brcmf_sdio_hostmail(bus);
 	}
 
 	sdio_release_host(bus->sdiodev->func[1]);
@@ -2403,16 +2443,15 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
 			set_bit(n, (unsigned long *)&bus->intstatus.counter);
 	}
 
-	brcmf_sdbrcm_clrintr(bus);
+	brcmf_sdio_clrintr(bus);
 
 	if (data_ok(bus) && bus->ctrl_frame_stat &&
 		(bus->clkstate == CLK_AVAIL)) {
 		int i;
 
 		sdio_claim_host(bus->sdiodev->func[1]);
-		err = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
-			SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
-			(u32) bus->ctrl_frame_len);
+		err = brcmf_sdiod_send_buf(bus->sdiodev, bus->ctrl_frame_buf,
+					   (u32)bus->ctrl_frame_len);
 
 		if (err < 0) {
 			/* On failure, abort the command and
@@ -2421,20 +2460,20 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
 				  err);
 			bus->sdcnt.tx_sderrs++;
 
-			brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
+			brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
 
-			brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
-					 SFC_WF_TERM, &err);
+			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+					  SFC_WF_TERM, &err);
 			bus->sdcnt.f1regdata++;
 
 			for (i = 0; i < 3; i++) {
 				u8 hi, lo;
-				hi = brcmf_sdio_regrb(bus->sdiodev,
-						      SBSDIO_FUNC1_WFRAMEBCHI,
-						      &err);
-				lo = brcmf_sdio_regrb(bus->sdiodev,
-						      SBSDIO_FUNC1_WFRAMEBCLO,
-						      &err);
+				hi = brcmf_sdiod_regrb(bus->sdiodev,
+						       SBSDIO_FUNC1_WFRAMEBCHI,
+						       &err);
+				lo = brcmf_sdiod_regrb(bus->sdiodev,
+						       SBSDIO_FUNC1_WFRAMEBCLO,
+						       &err);
 				bus->sdcnt.f1regdata += 2;
 				if ((hi == 0) && (lo == 0))
 					break;
@@ -2445,7 +2484,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
 		}
 		sdio_release_host(bus->sdiodev->func[1]);
 		bus->ctrl_frame_stat = false;
-		brcmf_sdbrcm_wait_event_wakeup(bus);
+		brcmf_sdio_wait_event_wakeup(bus);
 	}
 	/* Send queued frames (limit 1 if rx may still be pending) */
 	else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
@@ -2453,13 +2492,12 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
 		 && data_ok(bus)) {
 		framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
 					    txlimit;
-		framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt);
+		framecnt = brcmf_sdio_sendfromq(bus, framecnt);
 		txlimit -= framecnt;
 	}
 
-	if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) {
+	if (!brcmf_bus_ready(bus->sdiodev->bus_if) || (err != 0)) {
 		brcmf_err("failed backplane access over SDIO, halting operation\n");
-		bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
 		atomic_set(&bus->intstatus, 0);
 	} else if (atomic_read(&bus->intstatus) ||
 		   atomic_read(&bus->ipend) > 0 ||
@@ -2475,12 +2513,12 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
 		bus->activity = false;
 		brcmf_dbg(SDIO, "idle state\n");
 		sdio_claim_host(bus->sdiodev->func[1]);
-		brcmf_sdbrcm_bus_sleep(bus, true, false);
+		brcmf_sdio_bus_sleep(bus, true, false);
 		sdio_release_host(bus->sdiodev->func[1]);
 	}
 }
 
-static struct pktq *brcmf_sdbrcm_bus_gettxq(struct device *dev)
+static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
 {
 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
@@ -2489,7 +2527,7 @@ static struct pktq *brcmf_sdbrcm_bus_gettxq(struct device *dev)
 	return &bus->txq;
 }
 
-static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
+static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
 {
 	int ret = -EBADE;
 	uint datalen, prec;
@@ -2545,7 +2583,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
 #ifdef DEBUG
 #define CONSOLE_LINE_MAX	192
 
-static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
+static int brcmf_sdio_readconsole(struct brcmf_sdio *bus)
 {
 	struct brcmf_console *c = &bus->console;
 	u8 line[CONSOLE_LINE_MAX], ch;
@@ -2558,8 +2596,8 @@ static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
 
 	/* Read console log struct */
 	addr = bus->console_addr + offsetof(struct rte_console, log_le);
-	rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le,
-			      sizeof(c->log_le));
+	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le,
+			       sizeof(c->log_le));
 	if (rv < 0)
 		return rv;
 
@@ -2584,7 +2622,7 @@ static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
 
 	/* Read the console buffer */
 	addr = le32_to_cpu(c->log_le.buf);
-	rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize);
+	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize);
 	if (rv < 0)
 		return rv;
 
@@ -2622,14 +2660,13 @@ break2:
 }
 #endif				/* DEBUG */
 
-static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
+static int brcmf_sdio_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
 {
 	int i;
 	int ret;
 
 	bus->ctrl_frame_stat = false;
-	ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
-				    SDIO_FUNC_2, F2SYNC, frame, len);
+	ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
 
 	if (ret < 0) {
 		/* On failure, abort the command and terminate the frame */
@@ -2637,18 +2674,18 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
 			  ret);
 		bus->sdcnt.tx_sderrs++;
 
-		brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
+		brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
 
-		brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
-				 SFC_WF_TERM, NULL);
+		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+				  SFC_WF_TERM, NULL);
 		bus->sdcnt.f1regdata++;
 
 		for (i = 0; i < 3; i++) {
 			u8 hi, lo;
-			hi = brcmf_sdio_regrb(bus->sdiodev,
-					      SBSDIO_FUNC1_WFRAMEBCHI, NULL);
-			lo = brcmf_sdio_regrb(bus->sdiodev,
-					      SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+			hi = brcmf_sdiod_regrb(bus->sdiodev,
+					       SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+			lo = brcmf_sdiod_regrb(bus->sdiodev,
+					       SBSDIO_FUNC1_WFRAMEBCLO, NULL);
 			bus->sdcnt.f1regdata += 2;
 			if (hi == 0 && lo == 0)
 				break;
@@ -2662,10 +2699,10 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
 }
 
 static int
-brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
+brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
 {
 	u8 *frame;
-	u16 len;
+	u16 len, pad;
 	uint retries = 0;
 	u8 doff = 0;
 	int ret = -1;
@@ -2681,41 +2718,45 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
 	len = (msglen += bus->tx_hdrlen);
 
 	/* Add alignment padding (optional for ctl frames) */
-	doff = ((unsigned long)frame % BRCMF_SDALIGN);
+	doff = ((unsigned long)frame % bus->head_align);
 	if (doff) {
 		frame -= doff;
 		len += doff;
 		msglen += doff;
 		memset(frame, 0, doff + bus->tx_hdrlen);
 	}
-	/* precondition: doff < BRCMF_SDALIGN */
+	/* precondition: doff < bus->head_align */
 	doff += bus->tx_hdrlen;
 
 	/* Round send length to next SDIO block */
+	pad = 0;
 	if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
-		u16 pad = bus->blocksize - (len % bus->blocksize);
-		if ((pad <= bus->roundup) && (pad < bus->blocksize))
-			len += pad;
-	} else if (len % BRCMF_SDALIGN) {
-		len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
+		pad = bus->blocksize - (len % bus->blocksize);
+		if ((pad > bus->roundup) || (pad >= bus->blocksize))
+			pad = 0;
+	} else if (len % bus->head_align) {
+		pad = bus->head_align - (len % bus->head_align);
 	}
-
-	/* Satisfy length-alignment requirements */
-	if (len & (ALIGNMENT - 1))
-		len = roundup(len, ALIGNMENT);
+	len += pad;
 
 	/* precondition: IS_ALIGNED((unsigned long)frame, 2) */
 
 	/* Make sure backplane clock is on */
 	sdio_claim_host(bus->sdiodev->func[1]);
-	brcmf_sdbrcm_bus_sleep(bus, false, false);
+	brcmf_sdio_bus_sleep(bus, false, false);
 	sdio_release_host(bus->sdiodev->func[1]);
 
 	hd_info.len = (u16)msglen;
 	hd_info.channel = SDPCM_CONTROL_CHANNEL;
 	hd_info.dat_offset = doff;
+	hd_info.seq_num = bus->tx_seq;
+	hd_info.lastfrm = true;
+	hd_info.tail_pad = pad;
 	brcmf_sdio_hdpack(bus, frame, &hd_info);
 
+	if (bus->txglom)
+		brcmf_sdio_update_hwhdr(frame, len);
+
 	if (!data_ok(bus)) {
 		brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
 			  bus->tx_max, bus->tx_seq);
@@ -2746,7 +2787,7 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
 
 		do {
 			sdio_claim_host(bus->sdiodev->func[1]);
-			ret = brcmf_tx_frame(bus, frame, len);
+			ret = brcmf_sdio_tx_frame(bus, frame, len);
 			sdio_release_host(bus->sdiodev->func[1]);
 		} while (ret < 0 && retries++ < TXRETRIES);
 	}
@@ -2756,7 +2797,7 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
 		bus->activity = false;
 		sdio_claim_host(bus->sdiodev->func[1]);
 		brcmf_dbg(INFO, "idle\n");
-		brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
+		brcmf_sdio_clkctl(bus, CLK_NONE, true);
 		sdio_release_host(bus->sdiodev->func[1]);
 	}
 
@@ -2790,8 +2831,8 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
 	 * address of sdpcm_shared structure
 	 */
 	sdio_claim_host(bus->sdiodev->func[1]);
-	brcmf_sdbrcm_bus_sleep(bus, false, false);
-	rv = brcmf_sdio_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
+	brcmf_sdio_bus_sleep(bus, false, false);
+	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
 	sdio_release_host(bus->sdiodev->func[1]);
 	if (rv < 0)
 		return rv;
@@ -2811,8 +2852,8 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
 	}
 
 	/* Read hndrte_shared structure */
-	rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
-			      sizeof(struct sdpcm_shared_le));
+	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
+			       sizeof(struct sdpcm_shared_le));
 	if (rv < 0)
 		return rv;
 
@@ -2848,22 +2889,22 @@ static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
 
 	/* obtain console information from device memory */
 	addr = sh->console_addr + offsetof(struct rte_console, log_le);
-	rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
-			      (u8 *)&sh_val, sizeof(u32));
+	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
+			       (u8 *)&sh_val, sizeof(u32));
 	if (rv < 0)
 		return rv;
 	console_ptr = le32_to_cpu(sh_val);
 
 	addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size);
-	rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
-			      (u8 *)&sh_val, sizeof(u32));
+	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
+			       (u8 *)&sh_val, sizeof(u32));
 	if (rv < 0)
 		return rv;
 	console_size = le32_to_cpu(sh_val);
 
 	addr = sh->console_addr + offsetof(struct rte_console, log_le.idx);
-	rv = brcmf_sdio_ramrw(bus->sdiodev, false, addr,
-			      (u8 *)&sh_val, sizeof(u32));
+	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
+			       (u8 *)&sh_val, sizeof(u32));
 	if (rv < 0)
 		return rv;
 	console_index = le32_to_cpu(sh_val);
@@ -2877,8 +2918,8 @@ static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
 
 	/* obtain the console data from device */
 	conbuf[console_size] = '\0';
-	rv = brcmf_sdio_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf,
-			      console_size);
+	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf,
+			       console_size);
 	if (rv < 0)
 		goto done;
 
@@ -2915,8 +2956,8 @@ static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
 		return 0;
 	}
 
-	error = brcmf_sdio_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr,
-				 sizeof(struct brcmf_trap_info));
+	error = brcmf_sdiod_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr,
+				  sizeof(struct brcmf_trap_info));
 	if (error < 0)
 		return error;
 
@@ -2959,14 +3000,14 @@ static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
 
 	sdio_claim_host(bus->sdiodev->func[1]);
 	if (sh->assert_file_addr != 0) {
-		error = brcmf_sdio_ramrw(bus->sdiodev, false,
-					 sh->assert_file_addr, (u8 *)file, 80);
+		error = brcmf_sdiod_ramrw(bus->sdiodev, false,
+					  sh->assert_file_addr, (u8 *)file, 80);
 		if (error < 0)
 			return error;
 	}
 	if (sh->assert_exp_addr != 0) {
-		error = brcmf_sdio_ramrw(bus->sdiodev, false,
-					 sh->assert_exp_addr, (u8 *)expr, 80);
+		error = brcmf_sdiod_ramrw(bus->sdiodev, false,
+					  sh->assert_exp_addr, (u8 *)expr, 80);
 		if (error < 0)
 			return error;
 	}
@@ -2978,7 +3019,7 @@ static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
 	return simple_read_from_buffer(data, count, &pos, buf, res);
 }
 
-static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
+static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
 {
 	int error;
 	struct sdpcm_shared sh;
@@ -2999,8 +3040,8 @@ static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
 	return 0;
 }
 
-static int brcmf_sdbrcm_died_dump(struct brcmf_sdio *bus, char __user *data,
-				  size_t count, loff_t *ppos)
+static int brcmf_sdio_died_dump(struct brcmf_sdio *bus, char __user *data,
+				size_t count, loff_t *ppos)
 {
 	int error = 0;
 	struct sdpcm_shared sh;
@@ -3041,7 +3082,7 @@ static ssize_t brcmf_sdio_forensic_read(struct file *f, char __user *data,
 	struct brcmf_sdio *bus = f->private_data;
 	int res;
 
-	res = brcmf_sdbrcm_died_dump(bus, data, count, ppos);
+	res = brcmf_sdio_died_dump(bus, data, count, ppos);
 	if (res > 0)
 		*ppos += res;
 	return (ssize_t)res;
@@ -3066,7 +3107,7 @@ static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
 	brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
 }
 #else
-static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
+static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
 {
 	return 0;
 }
@@ -3077,7 +3118,7 @@ static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
 #endif /* DEBUG */
 
 static int
-brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
+brcmf_sdio_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
 {
 	int timeleft;
 	uint rxlen = 0;
@@ -3090,7 +3131,7 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
 	brcmf_dbg(TRACE, "Enter\n");
 
 	/* Wait until control frame is available */
-	timeleft = brcmf_sdbrcm_dcmd_resp_wait(bus, &bus->rxlen, &pending);
+	timeleft = brcmf_sdio_dcmd_resp_wait(bus, &bus->rxlen, &pending);
 
 	spin_lock_bh(&bus->rxctl_lock);
 	rxlen = bus->rxlen;
@@ -3107,13 +3148,13 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
 			  rxlen, msglen);
 	} else if (timeleft == 0) {
 		brcmf_err("resumed on timeout\n");
-		brcmf_sdbrcm_checkdied(bus);
+		brcmf_sdio_checkdied(bus);
 	} else if (pending) {
 		brcmf_dbg(CTL, "cancelled\n");
 		return -ERESTARTSYS;
 	} else {
 		brcmf_dbg(CTL, "resumed for unknown reason?\n");
-		brcmf_sdbrcm_checkdied(bus);
+		brcmf_sdio_checkdied(bus);
 	}
 
 	if (rxlen)
@@ -3124,46 +3165,69 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
 	return rxlen ? (int)rxlen : -ETIMEDOUT;
 }
 
-static bool brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
+#ifdef DEBUG
+static bool
+brcmf_sdio_verifymemory(struct brcmf_sdio_dev *sdiodev, u32 ram_addr,
+			u8 *ram_data, uint ram_sz)
 {
-	struct chip_info *ci = bus->ci;
-
-	/* To enter download state, disable ARM and reset SOCRAM.
-	 * To exit download state, simply reset ARM (default is RAM boot).
-	 */
-	if (enter) {
-		bus->alp_only = true;
+	char *ram_cmp;
+	int err;
+	bool ret = true;
+	int address;
+	int offset;
+	int len;
 
-		brcmf_sdio_chip_enter_download(bus->sdiodev, ci);
-	} else {
-		if (!brcmf_sdio_chip_exit_download(bus->sdiodev, ci, bus->vars,
-						   bus->varsz))
-			return false;
+	/* read back and verify */
+	brcmf_dbg(INFO, "Compare RAM dl & ul at 0x%08x; size=%d\n", ram_addr,
+		  ram_sz);
+	ram_cmp = kmalloc(MEMBLOCK, GFP_KERNEL);
+	/* do not proceed while no memory but  */
+	if (!ram_cmp)
+		return true;
 
-		/* Allow HT Clock now that the ARM is running. */
-		bus->alp_only = false;
-
-		bus->sdiodev->bus_if->state = BRCMF_BUS_LOAD;
+	address = ram_addr;
+	offset = 0;
+	while (offset < ram_sz) {
+		len = ((offset + MEMBLOCK) < ram_sz) ? MEMBLOCK :
+		      ram_sz - offset;
+		err = brcmf_sdiod_ramrw(sdiodev, false, address, ram_cmp, len);
+		if (err) {
+			brcmf_err("error %d on reading %d membytes at 0x%08x\n",
+				  err, len, address);
+			ret = false;
+			break;
+		} else if (memcmp(ram_cmp, &ram_data[offset], len)) {
+			brcmf_err("Downloaded RAM image is corrupted, block offset is %d, len is %d\n",
+				  offset, len);
+			ret = false;
+			break;
+		}
+		offset += len;
+		address += len;
 	}
 
+	kfree(ram_cmp);
+
+	return ret;
+}
+#else	/* DEBUG */
+static bool
+brcmf_sdio_verifymemory(struct brcmf_sdio_dev *sdiodev, u32 ram_addr,
+			u8 *ram_data, uint ram_sz)
+{
 	return true;
 }
+#endif	/* DEBUG */
 
-static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
+static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus,
+					 const struct firmware *fw)
 {
-	const struct firmware *fw;
 	int err;
 	int offset;
 	int address;
 	int len;
 
-	fw = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_BIN);
-	if (fw == NULL)
-		return -ENOENT;
-
-	if (brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4) !=
-	    BRCMF_MAX_CORENUM)
-		memcpy(&bus->ci->rst_vec, fw->data, sizeof(bus->ci->rst_vec));
+	brcmf_dbg(TRACE, "Enter\n");
 
 	err = 0;
 	offset = 0;
@@ -3171,148 +3235,113 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
 	while (offset < fw->size) {
 		len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
 		      fw->size - offset;
-		err = brcmf_sdio_ramrw(bus->sdiodev, true, address,
-				       (u8 *)&fw->data[offset], len);
+		err = brcmf_sdiod_ramrw(bus->sdiodev, true, address,
+					(u8 *)&fw->data[offset], len);
 		if (err) {
 			brcmf_err("error %d on writing %d membytes at 0x%08x\n",
 				  err, len, address);
-			goto failure;
+			return err;
 		}
 		offset += len;
 		address += len;
 	}
-
-failure:
-	release_firmware(fw);
+	if (!err)
+		if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
+					     (u8 *)fw->data, fw->size))
+			err = -EIO;
 
 	return err;
 }
 
-/*
- * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file
- * and ending in a NUL.
- * Removes carriage returns, empty lines, comment lines, and converts
- * newlines to NULs.
- * Shortens buffer as needed and pads with NULs.  End of buffer is marked
- * by two NULs.
-*/
-
-static int brcmf_process_nvram_vars(struct brcmf_sdio *bus,
-				    const struct firmware *nv)
-{
-	char *varbuf;
-	char *dp;
-	bool findNewline;
-	int column;
-	int ret = 0;
-	uint buf_len, n, len;
-
-	len = nv->size;
-	varbuf = vmalloc(len);
-	if (!varbuf)
-		return -ENOMEM;
-
-	memcpy(varbuf, nv->data, len);
-	dp = varbuf;
-
-	findNewline = false;
-	column = 0;
-
-	for (n = 0; n < len; n++) {
-		if (varbuf[n] == 0)
-			break;
-		if (varbuf[n] == '\r')
-			continue;
-		if (findNewline && varbuf[n] != '\n')
-			continue;
-		findNewline = false;
-		if (varbuf[n] == '#') {
-			findNewline = true;
-			continue;
-		}
-		if (varbuf[n] == '\n') {
-			if (column == 0)
-				continue;
-			*dp++ = 0;
-			column = 0;
-			continue;
-		}
-		*dp++ = varbuf[n];
-		column++;
-	}
-	buf_len = dp - varbuf;
-	while (dp < varbuf + n)
-		*dp++ = 0;
-
-	kfree(bus->vars);
-	/* roundup needed for download to device */
-	bus->varsz = roundup(buf_len + 1, 4);
-	bus->vars = kmalloc(bus->varsz, GFP_KERNEL);
-	if (bus->vars == NULL) {
-		bus->varsz = 0;
-		ret = -ENOMEM;
-		goto err;
-	}
+static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus,
+				     const struct firmware *nv)
+{
+	void *vars;
+	u32 varsz;
+	int address;
+	int err;
 
-	/* copy the processed variables and add null termination */
-	memcpy(bus->vars, varbuf, buf_len);
-	bus->vars[buf_len] = 0;
-err:
-	vfree(varbuf);
-	return ret;
-}
+	brcmf_dbg(TRACE, "Enter\n");
 
-static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
-{
-	const struct firmware *nv;
-	int ret;
+	vars = brcmf_nvram_strip(nv, &varsz);
 
-	nv = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
-	if (nv == NULL)
-		return -ENOENT;
+	if (vars == NULL)
+		return -EINVAL;
 
-	ret = brcmf_process_nvram_vars(bus, nv);
+	address = bus->ci->ramsize - varsz + bus->ci->rambase;
+	err = brcmf_sdiod_ramrw(bus->sdiodev, true, address, vars, varsz);
+	if (err)
+		brcmf_err("error %d on writing %d nvram bytes at 0x%08x\n",
+			  err, varsz, address);
+	else if (!brcmf_sdio_verifymemory(bus->sdiodev, address, vars, varsz))
+		err = -EIO;
 
-	release_firmware(nv);
+	brcmf_nvram_free(vars);
 
-	return ret;
+	return err;
 }
 
-static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
+static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
 {
-	int bcmerror = -1;
+	int bcmerror = -EFAULT;
+	const struct firmware *fw;
+	u32 rstvec;
+
+	sdio_claim_host(bus->sdiodev->func[1]);
+	brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
 
 	/* Keep arm in reset */
-	if (!brcmf_sdbrcm_download_state(bus, true)) {
-		brcmf_err("error placing ARM core in reset\n");
+	brcmf_sdio_chip_enter_download(bus->sdiodev, bus->ci);
+
+	fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_BIN);
+	if (fw == NULL) {
+		bcmerror = -ENOENT;
 		goto err;
 	}
 
-	if (brcmf_sdbrcm_download_code_file(bus)) {
+	rstvec = get_unaligned_le32(fw->data);
+	brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec);
+
+	bcmerror = brcmf_sdio_download_code_file(bus, fw);
+	release_firmware(fw);
+	if (bcmerror) {
 		brcmf_err("dongle image file download failed\n");
 		goto err;
 	}
 
-	if (brcmf_sdbrcm_download_nvram(bus)) {
+	fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
+	if (fw == NULL) {
+		bcmerror = -ENOENT;
+		goto err;
+	}
+
+	bcmerror = brcmf_sdio_download_nvram(bus, fw);
+	release_firmware(fw);
+	if (bcmerror) {
 		brcmf_err("dongle nvram file download failed\n");
 		goto err;
 	}
 
 	/* Take arm out of reset */
-	if (!brcmf_sdbrcm_download_state(bus, false)) {
+	if (!brcmf_sdio_chip_exit_download(bus->sdiodev, bus->ci, rstvec)) {
 		brcmf_err("error getting out of ARM core reset\n");
 		goto err;
 	}
 
+	/* Allow HT Clock now that the ARM is running. */
+	brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_LOAD);
 	bcmerror = 0;
 
 err:
+	brcmf_sdio_clkctl(bus, CLK_SDONLY, false);
+	sdio_release_host(bus->sdiodev->func[1]);
 	return bcmerror;
 }
 
-static bool brcmf_sdbrcm_sr_capable(struct brcmf_sdio *bus)
+static bool brcmf_sdio_sr_capable(struct brcmf_sdio *bus)
 {
-	u32 addr, reg;
+	u32 addr, reg, pmu_cc3_mask = ~0;
+	int err;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
@@ -3320,49 +3349,61 @@ static bool brcmf_sdbrcm_sr_capable(struct brcmf_sdio *bus)
 	if (bus->ci->pmurev < 17)
 		return false;
 
-	/* read PMU chipcontrol register 3*/
-	addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
-	brcmf_sdio_regwl(bus->sdiodev, addr, 3, NULL);
-	addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
-	reg = brcmf_sdio_regrl(bus->sdiodev, addr, NULL);
+	switch (bus->ci->chip) {
+	case BCM43241_CHIP_ID:
+	case BCM4335_CHIP_ID:
+	case BCM4339_CHIP_ID:
+		/* read PMU chipcontrol register 3 */
+		addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
+		brcmf_sdiod_regwl(bus->sdiodev, addr, 3, NULL);
+		addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
+		reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
+		return (reg & pmu_cc3_mask) != 0;
+	default:
+		addr = CORE_CC_REG(bus->ci->c_inf[0].base, pmucapabilities_ext);
+		reg = brcmf_sdiod_regrl(bus->sdiodev, addr, &err);
+		if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
+			return false;
 
-	return (bool)reg;
+		addr = CORE_CC_REG(bus->ci->c_inf[0].base, retention_ctl);
+		reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
+		return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
+			       PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
+	}
 }
 
-static void brcmf_sdbrcm_sr_init(struct brcmf_sdio *bus)
+static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
 {
 	int err = 0;
 	u8 val;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
-	val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL,
-			       &err);
+	val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
 	if (err) {
 		brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
 		return;
 	}
 
 	val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
-	brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL,
-			 val, &err);
+	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
 	if (err) {
 		brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
 		return;
 	}
 
 	/* Add CMD14 Support */
-	brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
-			 (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
-			  SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
-			 &err);
+	brcmf_sdiod_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
+			  (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
+			   SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
+			  &err);
 	if (err) {
 		brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
 		return;
 	}
 
-	brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-			 SBSDIO_FORCE_HT, &err);
+	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+			  SBSDIO_FORCE_HT, &err);
 	if (err) {
 		brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
 		return;
@@ -3374,7 +3415,7 @@ static void brcmf_sdbrcm_sr_init(struct brcmf_sdio *bus)
 }
 
 /* enable KSO bit */
-static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
+static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
 {
 	u8 val;
 	int err = 0;
@@ -3385,8 +3426,7 @@ static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
 	if (bus->ci->c_inf[1].rev < 12)
 		return 0;
 
-	val = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
-			       &err);
+	val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
 	if (err) {
 		brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
 		return err;
@@ -3395,8 +3435,8 @@ static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
 	if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
 		val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN <<
 			SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
-		brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
-				 val, &err);
+		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+				  val, &err);
 		if (err) {
 			brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
 			return err;
@@ -3407,31 +3447,70 @@ static int brcmf_sdbrcm_kso_init(struct brcmf_sdio *bus)
 }
 
 
-static bool
-brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
+static int brcmf_sdio_bus_preinit(struct device *dev)
 {
-	bool ret;
-
-	sdio_claim_host(bus->sdiodev->func[1]);
-
-	brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	struct brcmf_sdio *bus = sdiodev->bus;
+	uint pad_size;
+	u32 value;
+	u8 idx;
+	int err;
 
-	ret = _brcmf_sdbrcm_download_firmware(bus) == 0;
+	/* the commands below use the terms tx and rx from
+	 * a device perspective, ie. bus:txglom affects the
+	 * bus transfers from device to host.
+	 */
+	idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+	if (bus->ci->c_inf[idx].rev < 12) {
+		/* for sdio core rev < 12, disable txgloming */
+		value = 0;
+		err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
+					   sizeof(u32));
+	} else {
+		/* otherwise, set txglomalign */
+		value = 4;
+		if (sdiodev->pdata)
+			value = sdiodev->pdata->sd_sgentry_align;
+		/* SDIO ADMA requires at least 32 bit alignment */
+		value = max_t(u32, value, 4);
+		err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value,
+					   sizeof(u32));
+	}
 
-	brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
+	if (err < 0)
+		goto done;
 
-	sdio_release_host(bus->sdiodev->func[1]);
+	bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
+	if (sdiodev->sg_support) {
+		bus->txglom = false;
+		value = 1;
+		pad_size = bus->sdiodev->func[2]->cur_blksize << 1;
+		bus->txglom_sgpad = brcmu_pkt_buf_get_skb(pad_size);
+		if (!bus->txglom_sgpad)
+			brcmf_err("allocating txglom padding skb failed, reduced performance\n");
+
+		err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
+					   &value, sizeof(u32));
+		if (err < 0) {
+			/* bus:rxglom is allowed to fail */
+			err = 0;
+		} else {
+			bus->txglom = true;
+			bus->tx_hdrlen += SDPCM_HWEXT_LEN;
+		}
+	}
+	brcmf_bus_add_txhdrlen(bus->sdiodev->dev, bus->tx_hdrlen);
 
-	return ret;
+done:
+	return err;
 }
 
-static int brcmf_sdbrcm_bus_init(struct device *dev)
+static int brcmf_sdio_bus_init(struct device *dev)
 {
 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
 	struct brcmf_sdio *bus = sdiodev->bus;
-	unsigned long timeout;
-	u8 ready, enable;
 	int err, ret = 0;
 	u8 saveclk;
 
@@ -3439,8 +3518,11 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
 
 	/* try to download image and nvram to the dongle */
 	if (bus_if->state == BRCMF_BUS_DOWN) {
-		if (!(brcmf_sdbrcm_download_firmware(bus)))
-			return -1;
+		bus->alp_only = true;
+		err = brcmf_sdio_download_firmware(bus);
+		if (err)
+			return err;
+		bus->alp_only = false;
 	}
 
 	if (!bus->sdiodev->bus_if->drvr)
@@ -3448,21 +3530,21 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
 
 	/* Start the watchdog timer */
 	bus->sdcnt.tickcnt = 0;
-	brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+	brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
 
 	sdio_claim_host(bus->sdiodev->func[1]);
 
 	/* Make sure backplane clock is on, needed to generate F2 interrupt */
-	brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+	brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
 	if (bus->clkstate != CLK_AVAIL)
 		goto exit;
 
 	/* Force clocks on backplane to be sure F2 interrupt propagates */
-	saveclk = brcmf_sdio_regrb(bus->sdiodev,
-				   SBSDIO_FUNC1_CHIPCLKCSR, &err);
+	saveclk = brcmf_sdiod_regrb(bus->sdiodev,
+				    SBSDIO_FUNC1_CHIPCLKCSR, &err);
 	if (!err) {
-		brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-				 (saveclk | SBSDIO_FORCE_HT), &err);
+		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+				  (saveclk | SBSDIO_FORCE_HT), &err);
 	}
 	if (err) {
 		brcmf_err("Failed to force clock for F2: err %d\n", err);
@@ -3472,56 +3554,42 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
 	/* Enable function 2 (frame transfers) */
 	w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
 		  offsetof(struct sdpcmd_regs, tosbmailboxdata));
-	enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
-
-	brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, enable, NULL);
+	err = sdio_enable_func(bus->sdiodev->func[SDIO_FUNC_2]);
 
-	timeout = jiffies + msecs_to_jiffies(BRCMF_WAIT_F2RDY);
-	ready = 0;
-	while (enable != ready) {
-		ready = brcmf_sdio_regrb(bus->sdiodev,
-					 SDIO_CCCR_IORx, NULL);
-		if (time_after(jiffies, timeout))
-			break;
-		else if (time_after(jiffies, timeout - BRCMF_WAIT_F2RDY + 50))
-			/* prevent busy waiting if it takes too long */
-			msleep_interruptible(20);
-	}
 
-	brcmf_dbg(INFO, "enable 0x%02x, ready 0x%02x\n", enable, ready);
+	brcmf_dbg(INFO, "enable F2: err=%d\n", err);
 
 	/* If F2 successfully enabled, set core and enable interrupts */
-	if (ready == enable) {
+	if (!err) {
 		/* Set up the interrupt mask and enable interrupts */
 		bus->hostintmask = HOSTINTMASK;
 		w_sdreg32(bus, bus->hostintmask,
 			  offsetof(struct sdpcmd_regs, hostintmask));
 
-		brcmf_sdio_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
+		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
 	} else {
 		/* Disable F2 again */
-		enable = SDIO_FUNC_ENABLE_1;
-		brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, enable, NULL);
+		sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
 		ret = -ENODEV;
 	}
 
-	if (brcmf_sdbrcm_sr_capable(bus)) {
-		brcmf_sdbrcm_sr_init(bus);
+	if (brcmf_sdio_sr_capable(bus)) {
+		brcmf_sdio_sr_init(bus);
 	} else {
 		/* Restore previous clock setting */
-		brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-				 saveclk, &err);
+		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+				  saveclk, &err);
 	}
 
 	if (ret == 0) {
-		ret = brcmf_sdio_intr_register(bus->sdiodev);
+		ret = brcmf_sdiod_intr_register(bus->sdiodev);
 		if (ret != 0)
 			brcmf_err("intr register failed:%d\n", ret);
 	}
 
 	/* If we didn't come up, turn off backplane clock */
-	if (bus_if->state != BRCMF_BUS_DATA)
-		brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
+	if (ret != 0)
+		brcmf_sdio_clkctl(bus, CLK_NONE, false);
 
 exit:
 	sdio_release_host(bus->sdiodev->func[1]);
@@ -3529,10 +3597,8 @@ exit:
 	return ret;
 }
 
-void brcmf_sdbrcm_isr(void *arg)
+void brcmf_sdio_isr(struct brcmf_sdio *bus)
 {
-	struct brcmf_sdio *bus = (struct brcmf_sdio *) arg;
-
 	brcmf_dbg(TRACE, "Enter\n");
 
 	if (!bus) {
@@ -3540,7 +3606,7 @@ void brcmf_sdbrcm_isr(void *arg)
 		return;
 	}
 
-	if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
+	if (!brcmf_bus_ready(bus->sdiodev->bus_if)) {
 		brcmf_err("bus is down. we have nothing to do\n");
 		return;
 	}
@@ -3551,7 +3617,6 @@ void brcmf_sdbrcm_isr(void *arg)
 	else
 		if (brcmf_sdio_intr_rstatus(bus)) {
 			brcmf_err("failed backplane access\n");
-			bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
 		}
 
 	/* Disable additional interrupts (is this needed now)? */
@@ -3562,7 +3627,7 @@ void brcmf_sdbrcm_isr(void *arg)
 	queue_work(bus->brcmf_wq, &bus->datawork);
 }
 
-static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
+static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
 {
 #ifdef DEBUG
 	struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
@@ -3586,9 +3651,9 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
 				u8 devpend;
 
 				sdio_claim_host(bus->sdiodev->func[1]);
-				devpend = brcmf_sdio_regrb(bus->sdiodev,
-							   SDIO_CCCR_INTx,
-							   NULL);
+				devpend = brcmf_sdiod_regrb(bus->sdiodev,
+							    SDIO_CCCR_INTx,
+							    NULL);
 				sdio_release_host(bus->sdiodev->func[1]);
 				intstatus =
 				    devpend & (INTR_STATUS_FUNC1 |
@@ -3618,8 +3683,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
 			bus->console.count -= bus->console_interval;
 			sdio_claim_host(bus->sdiodev->func[1]);
 			/* Make sure backplane clock is on */
-			brcmf_sdbrcm_bus_sleep(bus, false, false);
-			if (brcmf_sdbrcm_readconsole(bus) < 0)
+			brcmf_sdio_bus_sleep(bus, false, false);
+			if (brcmf_sdio_readconsole(bus) < 0)
 				/* stop on error */
 				bus->console_interval = 0;
 			sdio_release_host(bus->sdiodev->func[1]);
@@ -3633,11 +3698,11 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
 			bus->idlecount = 0;
 			if (bus->activity) {
 				bus->activity = false;
-				brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+				brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
 			} else {
 				brcmf_dbg(SDIO, "idle\n");
 				sdio_claim_host(bus->sdiodev->func[1]);
-				brcmf_sdbrcm_bus_sleep(bus, true, false);
+				brcmf_sdio_bus_sleep(bus, true, false);
 				sdio_release_host(bus->sdiodev->func[1]);
 			}
 		}
@@ -3652,38 +3717,13 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
 					      datawork);
 
 	while (atomic_read(&bus->dpc_tskcnt)) {
-		brcmf_sdbrcm_dpc(bus);
+		brcmf_sdio_dpc(bus);
 		atomic_dec(&bus->dpc_tskcnt);
 	}
 }
 
-static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
-{
-	brcmf_dbg(TRACE, "Enter\n");
-
-	kfree(bus->rxbuf);
-	bus->rxctl = bus->rxbuf = NULL;
-	bus->rxlen = 0;
-}
-
-static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
-{
-	brcmf_dbg(TRACE, "Enter\n");
-
-	if (bus->sdiodev->bus_if->maxctl) {
-		bus->rxblen =
-		    roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
-			    ALIGNMENT) + BRCMF_SDALIGN;
-		bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
-		if (!(bus->rxbuf))
-			return false;
-	}
-
-	return true;
-}
-
 static bool
-brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
+brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
 {
 	u8 clkctl = 0;
 	int err = 0;
@@ -3691,23 +3731,21 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
 	u32 reg_val;
 	u32 drivestrength;
 
-	bus->alp_only = true;
-
 	sdio_claim_host(bus->sdiodev->func[1]);
 
 	pr_debug("F1 signature read @0x18000000=0x%4x\n",
-		 brcmf_sdio_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
+		 brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
 
 	/*
 	 * Force PLL off until brcmf_sdio_chip_attach()
 	 * programs PLL control regs
 	 */
 
-	brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-			 BRCMF_INIT_CLKCTL1, &err);
+	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+			  BRCMF_INIT_CLKCTL1, &err);
 	if (!err)
-		clkctl = brcmf_sdio_regrb(bus->sdiodev,
-					  SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
 
 	if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
 		brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
@@ -3715,12 +3753,17 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
 		goto fail;
 	}
 
-	if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci, regsva)) {
+	/* SDIO register access works so moving
+	 * state from UNKNOWN to DOWN.
+	 */
+	brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_DOWN);
+
+	if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci)) {
 		brcmf_err("brcmf_sdio_chip_attach failed!\n");
 		goto fail;
 	}
 
-	if (brcmf_sdbrcm_kso_init(bus)) {
+	if (brcmf_sdio_kso_init(bus)) {
 		brcmf_err("error enabling KSO\n");
 		goto fail;
 	}
@@ -3739,33 +3782,33 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
 	}
 
 	/* Set card control so an SDIO card reset does a WLAN backplane reset */
-	reg_val = brcmf_sdio_regrb(bus->sdiodev,
-				   SDIO_CCCR_BRCM_CARDCTRL, &err);
+	reg_val = brcmf_sdiod_regrb(bus->sdiodev,
+				    SDIO_CCCR_BRCM_CARDCTRL, &err);
 	if (err)
 		goto fail;
 
 	reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
 
-	brcmf_sdio_regwb(bus->sdiodev,
-			 SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
+	brcmf_sdiod_regwb(bus->sdiodev,
+			  SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
 	if (err)
 		goto fail;
 
 	/* set PMUControl so a backplane reset does PMU state reload */
 	reg_addr = CORE_CC_REG(bus->ci->c_inf[0].base,
 			       pmucontrol);
-	reg_val = brcmf_sdio_regrl(bus->sdiodev,
-				   reg_addr,
-				   &err);
+	reg_val = brcmf_sdiod_regrl(bus->sdiodev,
+				    reg_addr,
+				    &err);
 	if (err)
 		goto fail;
 
 	reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
 
-	brcmf_sdio_regwl(bus->sdiodev,
-			 reg_addr,
-			 reg_val,
-			 &err);
+	brcmf_sdiod_regwl(bus->sdiodev,
+			  reg_addr,
+			  reg_val,
+			  &err);
 	if (err)
 		goto fail;
 
@@ -3774,9 +3817,13 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
 
 	brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
 
+	/* allocate header buffer */
+	bus->hdrbuf = kzalloc(MAX_HDR_READ + bus->head_align, GFP_KERNEL);
+	if (!bus->hdrbuf)
+		return false;
 	/* Locate an appropriately-aligned portion of hdrbuf */
 	bus->rxhdr = (u8 *) roundup((unsigned long)&bus->hdrbuf[0],
-				    BRCMF_SDALIGN);
+				    bus->head_align);
 
 	/* Set the poll and/or interrupt flags */
 	bus->intr = true;
@@ -3791,42 +3838,8 @@ fail:
 	return false;
 }
 
-static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
-{
-	brcmf_dbg(TRACE, "Enter\n");
-
-	sdio_claim_host(bus->sdiodev->func[1]);
-
-	/* Disable F2 to clear any intermediate frame state on the dongle */
-	brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx,
-			 SDIO_FUNC_ENABLE_1, NULL);
-
-	bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
-	bus->rxflow = false;
-
-	/* Done with backplane-dependent accesses, can drop clock... */
-	brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
-
-	sdio_release_host(bus->sdiodev->func[1]);
-
-	/* ...and initialize clock/power states */
-	bus->clkstate = CLK_SDONLY;
-	bus->idletime = BRCMF_IDLE_INTERVAL;
-	bus->idleclock = BRCMF_IDLE_ACTIVE;
-
-	/* Query the F2 block size, set roundup accordingly */
-	bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
-	bus->roundup = min(max_roundup, bus->blocksize);
-
-	/* SR state */
-	bus->sleeping = false;
-	bus->sr_enabled = false;
-
-	return true;
-}
-
 static int
-brcmf_sdbrcm_watchdog_thread(void *data)
+brcmf_sdio_watchdog_thread(void *data)
 {
 	struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
 
@@ -3836,7 +3849,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
 		if (kthread_should_stop())
 			break;
 		if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
-			brcmf_sdbrcm_bus_watchdog(bus);
+			brcmf_sdio_bus_watchdog(bus);
 			/* Count the tick for reference */
 			bus->sdcnt.tickcnt++;
 		} else
@@ -3846,7 +3859,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
 }
 
 static void
-brcmf_sdbrcm_watchdog(unsigned long data)
+brcmf_sdio_watchdog(unsigned long data)
 {
 	struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
 
@@ -3859,73 +3872,23 @@ brcmf_sdbrcm_watchdog(unsigned long data)
 	}
 }
 
-static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
-{
-	brcmf_dbg(TRACE, "Enter\n");
-
-	if (bus->ci) {
-		sdio_claim_host(bus->sdiodev->func[1]);
-		brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
-		brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
-		sdio_release_host(bus->sdiodev->func[1]);
-		brcmf_sdio_chip_detach(&bus->ci);
-		if (bus->vars && bus->varsz)
-			kfree(bus->vars);
-		bus->vars = NULL;
-	}
-
-	brcmf_dbg(TRACE, "Disconnected\n");
-}
-
-/* Detach and free everything */
-static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
-{
-	brcmf_dbg(TRACE, "Enter\n");
-
-	if (bus) {
-		/* De-register interrupt handler */
-		brcmf_sdio_intr_unregister(bus->sdiodev);
-
-		cancel_work_sync(&bus->datawork);
-		if (bus->brcmf_wq)
-			destroy_workqueue(bus->brcmf_wq);
-
-		if (bus->sdiodev->bus_if->drvr) {
-			brcmf_detach(bus->sdiodev->dev);
-			brcmf_sdbrcm_release_dongle(bus);
-		}
-
-		brcmf_sdbrcm_release_malloc(bus);
-
-		kfree(bus);
-	}
-
-	brcmf_dbg(TRACE, "Disconnected\n");
-}
-
 static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
-	.stop = brcmf_sdbrcm_bus_stop,
-	.init = brcmf_sdbrcm_bus_init,
-	.txdata = brcmf_sdbrcm_bus_txdata,
-	.txctl = brcmf_sdbrcm_bus_txctl,
-	.rxctl = brcmf_sdbrcm_bus_rxctl,
-	.gettxq = brcmf_sdbrcm_bus_gettxq,
+	.stop = brcmf_sdio_bus_stop,
+	.preinit = brcmf_sdio_bus_preinit,
+	.init = brcmf_sdio_bus_init,
+	.txdata = brcmf_sdio_bus_txdata,
+	.txctl = brcmf_sdio_bus_txctl,
+	.rxctl = brcmf_sdio_bus_rxctl,
+	.gettxq = brcmf_sdio_bus_gettxq,
 };
 
-void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
+struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
 {
 	int ret;
 	struct brcmf_sdio *bus;
-	struct brcmf_bus_dcmd *dlst;
-	u32 dngl_txglom;
-	u32 txglomalign = 0;
-	u8 idx;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
-	/* We make an assumption about address window mappings:
-	 * regsva == SI_ENUM_BASE*/
-
 	/* Allocate private bus interface state */
 	bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
 	if (!bus)
@@ -3939,6 +3902,18 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
 	bus->txminmax = BRCMF_TXMINMAX;
 	bus->tx_seq = SDPCM_SEQ_WRAP - 1;
 
+	/* platform specific configuration:
+	 *   alignments must be at least 4 bytes for ADMA
+         */
+	bus->head_align = ALIGNMENT;
+	bus->sgentry_align = ALIGNMENT;
+	if (sdiodev->pdata) {
+		if (sdiodev->pdata->sd_head_align > ALIGNMENT)
+			bus->head_align = sdiodev->pdata->sd_head_align;
+		if (sdiodev->pdata->sd_sgentry_align > ALIGNMENT)
+			bus->sgentry_align = sdiodev->pdata->sd_sgentry_align;
+	}
+
 	INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
 	bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
 	if (bus->brcmf_wq == NULL) {
@@ -3947,8 +3922,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
 	}
 
 	/* attempt to attach to the dongle */
-	if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) {
-		brcmf_err("brcmf_sdbrcm_probe_attach failed\n");
+	if (!(brcmf_sdio_probe_attach(bus))) {
+		brcmf_err("brcmf_sdio_probe_attach failed\n");
 		goto fail;
 	}
 
@@ -3960,11 +3935,11 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
 	/* Set up the watchdog timer */
 	init_timer(&bus->timer);
 	bus->timer.data = (unsigned long)bus;
-	bus->timer.function = brcmf_sdbrcm_watchdog;
+	bus->timer.function = brcmf_sdio_watchdog;
 
 	/* Initialize watchdog thread */
 	init_completion(&bus->watchdog_wait);
-	bus->watchdog_tsk = kthread_run(brcmf_sdbrcm_watchdog_thread,
+	bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
 					bus, "brcmf_watchdog");
 	if (IS_ERR(bus->watchdog_tsk)) {
 		pr_warn("brcmf_watchdog thread failed to start\n");
@@ -3983,50 +3958,52 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
 	bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
 
 	/* Attach to the common layer, reserve hdr space */
-	ret = brcmf_attach(bus->tx_hdrlen, bus->sdiodev->dev);
+	ret = brcmf_attach(bus->sdiodev->dev);
 	if (ret != 0) {
 		brcmf_err("brcmf_attach failed\n");
 		goto fail;
 	}
 
 	/* Allocate buffers */
-	if (!(brcmf_sdbrcm_probe_malloc(bus))) {
-		brcmf_err("brcmf_sdbrcm_probe_malloc failed\n");
-		goto fail;
+	if (bus->sdiodev->bus_if->maxctl) {
+		bus->rxblen =
+		    roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
+			    ALIGNMENT) + bus->head_align;
+		bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
+		if (!(bus->rxbuf)) {
+			brcmf_err("rxbuf allocation failed\n");
+			goto fail;
+		}
 	}
 
-	if (!(brcmf_sdbrcm_probe_init(bus))) {
-		brcmf_err("brcmf_sdbrcm_probe_init failed\n");
-		goto fail;
-	}
+	sdio_claim_host(bus->sdiodev->func[1]);
+
+	/* Disable F2 to clear any intermediate frame state on the dongle */
+	sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
+
+	bus->rxflow = false;
+
+	/* Done with backplane-dependent accesses, can drop clock... */
+	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+	sdio_release_host(bus->sdiodev->func[1]);
+
+	/* ...and initialize clock/power states */
+	bus->clkstate = CLK_SDONLY;
+	bus->idletime = BRCMF_IDLE_INTERVAL;
+	bus->idleclock = BRCMF_IDLE_ACTIVE;
+
+	/* Query the F2 block size, set roundup accordingly */
+	bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
+	bus->roundup = min(max_roundup, bus->blocksize);
+
+	/* SR state */
+	bus->sleeping = false;
+	bus->sr_enabled = false;
 
 	brcmf_sdio_debugfs_create(bus);
 	brcmf_dbg(INFO, "completed!!\n");
 
-	/* sdio bus core specific dcmd */
-	idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
-	dlst = kzalloc(sizeof(struct brcmf_bus_dcmd), GFP_KERNEL);
-	if (dlst) {
-		if (bus->ci->c_inf[idx].rev < 12) {
-			/* for sdio core rev < 12, disable txgloming */
-			dngl_txglom = 0;
-			dlst->name = "bus:txglom";
-			dlst->param = (char *)&dngl_txglom;
-			dlst->param_len = sizeof(u32);
-		} else {
-			/* otherwise, set txglomalign */
-			if (sdiodev->pdata)
-				txglomalign = sdiodev->pdata->sd_sgentry_align;
-			/* SDIO ADMA requires at least 32 bit alignment */
-			if (txglomalign < 4)
-				txglomalign = 4;
-			dlst->name = "bus:txglomalign";
-			dlst->param = (char *)&txglomalign;
-			dlst->param_len = sizeof(u32);
-		}
-		list_add(&dlst->list, &bus->sdiodev->bus_if->dcmd_list);
-	}
-
 	/* if firmware path present try to download and bring up bus */
 	ret = brcmf_bus_start(bus->sdiodev->dev);
 	if (ret != 0) {
@@ -4037,24 +4014,55 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
 	return bus;
 
 fail:
-	brcmf_sdbrcm_release(bus);
+	brcmf_sdio_remove(bus);
 	return NULL;
 }
 
-void brcmf_sdbrcm_disconnect(void *ptr)
+/* Detach and free everything */
+void brcmf_sdio_remove(struct brcmf_sdio *bus)
 {
-	struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr;
-
 	brcmf_dbg(TRACE, "Enter\n");
 
-	if (bus)
-		brcmf_sdbrcm_release(bus);
+	if (bus) {
+		/* De-register interrupt handler */
+		brcmf_sdiod_intr_unregister(bus->sdiodev);
+
+		cancel_work_sync(&bus->datawork);
+		if (bus->brcmf_wq)
+			destroy_workqueue(bus->brcmf_wq);
+
+		if (bus->sdiodev->bus_if->drvr) {
+			brcmf_detach(bus->sdiodev->dev);
+		}
+
+		if (bus->ci) {
+			if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
+				sdio_claim_host(bus->sdiodev->func[1]);
+				brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
+				/* Leave the device in state where it is
+				 * 'quiet'. This is done by putting it in
+				 * download_state which essentially resets
+				 * all necessary cores.
+				 */
+				msleep(20);
+				brcmf_sdio_chip_enter_download(bus->sdiodev,
+							       bus->ci);
+				brcmf_sdio_clkctl(bus, CLK_NONE, false);
+				sdio_release_host(bus->sdiodev->func[1]);
+			}
+			brcmf_sdio_chip_detach(&bus->ci);
+		}
+
+		brcmu_pkt_buf_free_skb(bus->txglom_sgpad);
+		kfree(bus->rxbuf);
+		kfree(bus->hdrbuf);
+		kfree(bus);
+	}
 
 	brcmf_dbg(TRACE, "Disconnected\n");
 }
 
-void
-brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
+void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick)
 {
 	/* Totally stop the timer */
 	if (!wdtick && bus->wd_timer_valid) {
@@ -4065,7 +4073,7 @@ brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
 	}
 
 	/* don't start the wd until fw is loaded */
-	if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN)
+	if (bus->sdiodev->bus_if->state != BRCMF_BUS_DATA)
 		return;
 
 	if (wdtick) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index 14bc24dc5bae..51b53a73d074 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -122,6 +122,52 @@ enum brcmf_fweh_event_code {
 #define BRCMF_EVENT_MSG_FLUSHTXQ	0x02
 #define BRCMF_EVENT_MSG_GROUP		0x04
 
+/* status field values in struct brcmf_event_msg */
+#define BRCMF_E_STATUS_SUCCESS			0
+#define BRCMF_E_STATUS_FAIL			1
+#define BRCMF_E_STATUS_TIMEOUT			2
+#define BRCMF_E_STATUS_NO_NETWORKS		3
+#define BRCMF_E_STATUS_ABORT			4
+#define BRCMF_E_STATUS_NO_ACK			5
+#define BRCMF_E_STATUS_UNSOLICITED		6
+#define BRCMF_E_STATUS_ATTEMPT			7
+#define BRCMF_E_STATUS_PARTIAL			8
+#define BRCMF_E_STATUS_NEWSCAN			9
+#define BRCMF_E_STATUS_NEWASSOC			10
+#define BRCMF_E_STATUS_11HQUIET			11
+#define BRCMF_E_STATUS_SUPPRESS			12
+#define BRCMF_E_STATUS_NOCHANS			13
+#define BRCMF_E_STATUS_CS_ABORT			15
+#define BRCMF_E_STATUS_ERROR			16
+
+/* reason field values in struct brcmf_event_msg */
+#define BRCMF_E_REASON_INITIAL_ASSOC		0
+#define BRCMF_E_REASON_LOW_RSSI			1
+#define BRCMF_E_REASON_DEAUTH			2
+#define BRCMF_E_REASON_DISASSOC			3
+#define BRCMF_E_REASON_BCNS_LOST		4
+#define BRCMF_E_REASON_MINTXRATE		9
+#define BRCMF_E_REASON_TXFAIL			10
+
+#define BRCMF_E_REASON_LINK_BSSCFG_DIS		4
+#define BRCMF_E_REASON_FAST_ROAM_FAILED		5
+#define BRCMF_E_REASON_DIRECTED_ROAM		6
+#define BRCMF_E_REASON_TSPEC_REJECTED		7
+#define BRCMF_E_REASON_BETTER_AP		8
+
+/* action field values for brcmf_ifevent */
+#define BRCMF_E_IF_ADD				1
+#define BRCMF_E_IF_DEL				2
+#define BRCMF_E_IF_CHANGE			3
+
+/* flag field values for brcmf_ifevent */
+#define BRCMF_E_IF_FLAG_NOIF			1
+
+/* role field values for brcmf_ifevent */
+#define BRCMF_E_IF_ROLE_STA			0
+#define BRCMF_E_IF_ROLE_AP			1
+#define BRCMF_E_IF_ROLE_WDS			2
+
 /**
  * definitions for event packet validation.
  */
@@ -160,6 +206,14 @@ struct brcmf_event_msg {
 	u8 bsscfgidx;
 };
 
+struct brcmf_if_event {
+	u8 ifidx;
+	u8 action;
+	u8 flags;
+	u8 bssidx;
+	u8 role;
+};
+
 typedef int (*brcmf_fweh_handler_t)(struct brcmf_if *ifp,
 				    const struct brcmf_event_msg *evtmsg,
 				    void *data);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
index 04f395930d86..22adbe311d20 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -27,6 +27,7 @@
 #include "dhd_dbg.h"
 #include "tracepoint.h"
 #include "fwil.h"
+#include "proto.h"
 
 
 #define MAX_HEX_DUMP_LEN	64
@@ -46,11 +47,9 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
 	if (data != NULL)
 		len = min_t(uint, len, BRCMF_DCMD_MAXLEN);
 	if (set)
-		err = brcmf_proto_cdc_set_dcmd(drvr, ifp->ifidx, cmd, data,
-					       len);
+		err = brcmf_proto_set_dcmd(drvr, ifp->ifidx, cmd, data, len);
 	else
-		err = brcmf_proto_cdc_query_dcmd(drvr, ifp->ifidx, cmd, data,
-						 len);
+		err = brcmf_proto_query_dcmd(drvr, ifp->ifidx, cmd, data, len);
 
 	if (err >= 0)
 		err = 0;
@@ -69,7 +68,7 @@ brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
 
 	brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
 	brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
-			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
 
 	err = brcmf_fil_cmd_data(ifp, cmd, data, len, true);
 	mutex_unlock(&ifp->drvr->proto_block);
@@ -87,7 +86,7 @@ brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
 
 	brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
 	brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
-			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
 
 	mutex_unlock(&ifp->drvr->proto_block);
 
@@ -156,7 +155,7 @@ brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
 
 	brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
 	brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
-			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
 
 	buflen = brcmf_create_iovar(name, data, len, drvr->proto_buf,
 				    sizeof(drvr->proto_buf));
@@ -196,7 +195,7 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
 
 	brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
 	brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
-			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
 
 	mutex_unlock(&drvr->proto_block);
 	return err;
@@ -279,7 +278,7 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
 
 	brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
 	brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
-			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
 
 	buflen = brcmf_create_bsscfg(ifp->bssidx, name, data, len,
 				     drvr->proto_buf, sizeof(drvr->proto_buf));
@@ -318,7 +317,7 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
 	}
 	brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
 	brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
-			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
 
 	mutex_unlock(&drvr->proto_block);
 	return err;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
index 16eb8202fb1e..77eae86e55c2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
@@ -17,6 +17,67 @@
 #ifndef _fwil_h_
 #define _fwil_h_
 
+/*******************************************************************************
+ * Dongle command codes that are interpreted by firmware
+ ******************************************************************************/
+#define BRCMF_C_GET_VERSION			1
+#define BRCMF_C_UP				2
+#define BRCMF_C_DOWN				3
+#define BRCMF_C_SET_PROMISC			10
+#define BRCMF_C_GET_RATE			12
+#define BRCMF_C_GET_INFRA			19
+#define BRCMF_C_SET_INFRA			20
+#define BRCMF_C_GET_AUTH			21
+#define BRCMF_C_SET_AUTH			22
+#define BRCMF_C_GET_BSSID			23
+#define BRCMF_C_GET_SSID			25
+#define BRCMF_C_SET_SSID			26
+#define BRCMF_C_TERMINATED			28
+#define BRCMF_C_GET_CHANNEL			29
+#define BRCMF_C_SET_CHANNEL			30
+#define BRCMF_C_GET_SRL				31
+#define BRCMF_C_SET_SRL				32
+#define BRCMF_C_GET_LRL				33
+#define BRCMF_C_SET_LRL				34
+#define BRCMF_C_GET_RADIO			37
+#define BRCMF_C_SET_RADIO			38
+#define BRCMF_C_GET_PHYTYPE			39
+#define BRCMF_C_SET_KEY				45
+#define BRCMF_C_SET_PASSIVE_SCAN		49
+#define BRCMF_C_SCAN				50
+#define BRCMF_C_SCAN_RESULTS			51
+#define BRCMF_C_DISASSOC			52
+#define BRCMF_C_REASSOC				53
+#define BRCMF_C_SET_ROAM_TRIGGER		55
+#define BRCMF_C_SET_ROAM_DELTA			57
+#define BRCMF_C_GET_BCNPRD			75
+#define BRCMF_C_SET_BCNPRD			76
+#define BRCMF_C_GET_DTIMPRD			77
+#define BRCMF_C_SET_DTIMPRD			78
+#define BRCMF_C_SET_COUNTRY			84
+#define BRCMF_C_GET_PM				85
+#define BRCMF_C_SET_PM				86
+#define BRCMF_C_GET_CURR_RATESET		114
+#define BRCMF_C_GET_AP				117
+#define BRCMF_C_SET_AP				118
+#define BRCMF_C_GET_RSSI			127
+#define BRCMF_C_GET_WSEC			133
+#define BRCMF_C_SET_WSEC			134
+#define BRCMF_C_GET_PHY_NOISE			135
+#define BRCMF_C_GET_BSS_INFO			136
+#define BRCMF_C_GET_BANDLIST			140
+#define BRCMF_C_SET_SCB_TIMEOUT			158
+#define BRCMF_C_GET_PHYLIST			180
+#define BRCMF_C_SET_SCAN_CHANNEL_TIME		185
+#define BRCMF_C_SET_SCAN_UNASSOC_TIME		187
+#define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON	201
+#define BRCMF_C_GET_VALID_CHANNELS		217
+#define BRCMF_C_GET_KEY_PRIMARY			235
+#define BRCMF_C_SET_KEY_PRIMARY			236
+#define BRCMF_C_SET_SCAN_PASSIVE_TIME		258
+#define BRCMF_C_GET_VAR				262
+#define BRCMF_C_SET_VAR				263
+
 s32 brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
 s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
 s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index ecabb04f33c3..af17a5bc8b83 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -29,6 +29,24 @@
 #define BRCMF_ARP_OL_HOST_AUTO_REPLY	0x00000004
 #define BRCMF_ARP_OL_PEER_AUTO_REPLY	0x00000008
 
+#define	BRCMF_BSS_INFO_VERSION	109 /* curr ver of brcmf_bss_info_le struct */
+#define BRCMF_BSS_RSSI_ON_CHANNEL	0x0002
+
+#define BRCMF_STA_ASSOC			0x10		/* Associated */
+
+/* size of brcmf_scan_params not including variable length array */
+#define BRCMF_SCAN_PARAMS_FIXED_SIZE	64
+
+/* masks for channel and ssid count */
+#define BRCMF_SCAN_PARAMS_COUNT_MASK	0x0000ffff
+#define BRCMF_SCAN_PARAMS_NSSID_SHIFT	16
+
+/* primary (ie tx) key */
+#define BRCMF_PRIMARY_KEY		(1 << 1)
+#define DOT11_BSSTYPE_ANY		2
+#define BRCMF_ESCAN_REQ_VERSION		1
+
+#define BRCMF_MAXRATES_IN_SET		16	/* max # of rates in rateset */
 
 enum brcmf_fil_p2p_if_types {
 	BRCMF_FIL_P2P_IF_CLIENT,
@@ -90,4 +108,290 @@ enum brcmf_tdls_manual_ep_ops {
 	BRCMF_TDLS_MANUAL_EP_DISCOVERY = 6
 };
 
+/* Pattern matching filter. Specifies an offset within received packets to
+ * start matching, the pattern to match, the size of the pattern, and a bitmask
+ * that indicates which bits within the pattern should be matched.
+ */
+struct brcmf_pkt_filter_pattern_le {
+	/*
+	 * Offset within received packet to start pattern matching.
+	 * Offset '0' is the first byte of the ethernet header.
+	 */
+	__le32 offset;
+	/* Size of the pattern.  Bitmask must be the same size.*/
+	__le32 size_bytes;
+	/*
+	 * Variable length mask and pattern data. mask starts at offset 0.
+	 * Pattern immediately follows mask.
+	 */
+	u8 mask_and_pattern[1];
+};
+
+/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */
+struct brcmf_pkt_filter_le {
+	__le32 id;		/* Unique filter id, specified by app. */
+	__le32 type;		/* Filter type (WL_PKT_FILTER_TYPE_xxx). */
+	__le32 negate_match;	/* Negate the result of filter matches */
+	union {			/* Filter definitions */
+		struct brcmf_pkt_filter_pattern_le pattern; /* Filter pattern */
+	} u;
+};
+
+/* IOVAR "pkt_filter_enable" parameter. */
+struct brcmf_pkt_filter_enable_le {
+	__le32 id;		/* Unique filter id */
+	__le32 enable;		/* Enable/disable bool */
+};
+
+/* BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in struct brcmf_scan_results)
+ */
+struct brcmf_bss_info_le {
+	__le32 version;		/* version field */
+	__le32 length;		/* byte length of data in this record,
+				 * starting at version and including IEs
+				 */
+	u8 BSSID[ETH_ALEN];
+	__le16 beacon_period;	/* units are Kusec */
+	__le16 capability;	/* Capability information */
+	u8 SSID_len;
+	u8 SSID[32];
+	struct {
+		__le32 count;   /* # rates in this set */
+		u8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
+	} rateset;		/* supported rates */
+	__le16 chanspec;	/* chanspec for bss */
+	__le16 atim_window;	/* units are Kusec */
+	u8 dtim_period;	/* DTIM period */
+	__le16 RSSI;		/* receive signal strength (in dBm) */
+	s8 phy_noise;		/* noise (in dBm) */
+
+	u8 n_cap;		/* BSS is 802.11N Capable */
+	/* 802.11N BSS Capabilities (based on HT_CAP_*): */
+	__le32 nbss_cap;
+	u8 ctl_ch;		/* 802.11N BSS control channel number */
+	__le32 reserved32[1];	/* Reserved for expansion of BSS properties */
+	u8 flags;		/* flags */
+	u8 reserved[3];	/* Reserved for expansion of BSS properties */
+	u8 basic_mcs[MCSSET_LEN];	/* 802.11N BSS required MCS set */
+
+	__le16 ie_offset;	/* offset at which IEs start, from beginning */
+	__le32 ie_length;	/* byte length of Information Elements */
+	__le16 SNR;		/* average SNR of during frame reception */
+	/* Add new fields here */
+	/* variable length Information Elements */
+};
+
+struct brcm_rateset_le {
+	/* # rates in this set */
+	__le32 count;
+	/* rates in 500kbps units w/hi bit set if basic */
+	u8 rates[BRCMF_MAXRATES_IN_SET];
+};
+
+struct brcmf_ssid {
+	u32 SSID_len;
+	unsigned char SSID[32];
+};
+
+struct brcmf_ssid_le {
+	__le32 SSID_len;
+	unsigned char SSID[32];
+};
+
+struct brcmf_scan_params_le {
+	struct brcmf_ssid_le ssid_le;	/* default: {0, ""} */
+	u8 bssid[ETH_ALEN];	/* default: bcast */
+	s8 bss_type;		/* default: any,
+				 * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+				 */
+	u8 scan_type;	/* flags, 0 use default */
+	__le32 nprobes;	  /* -1 use default, number of probes per channel */
+	__le32 active_time;	/* -1 use default, dwell time per channel for
+				 * active scanning
+				 */
+	__le32 passive_time;	/* -1 use default, dwell time per channel
+				 * for passive scanning
+				 */
+	__le32 home_time;	/* -1 use default, dwell time for the
+				 * home channel between channel scans
+				 */
+	__le32 channel_num;	/* count of channels and ssids that follow
+				 *
+				 * low half is count of channels in
+				 * channel_list, 0 means default (use all
+				 * available channels)
+				 *
+				 * high half is entries in struct brcmf_ssid
+				 * array that follows channel_list, aligned for
+				 * s32 (4 bytes) meaning an odd channel count
+				 * implies a 2-byte pad between end of
+				 * channel_list and first ssid
+				 *
+				 * if ssid count is zero, single ssid in the
+				 * fixed parameter portion is assumed, otherwise
+				 * ssid in the fixed portion is ignored
+				 */
+	__le16 channel_list[1];	/* list of chanspecs */
+};
+
+struct brcmf_scan_results {
+	u32 buflen;
+	u32 version;
+	u32 count;
+	struct brcmf_bss_info_le bss_info_le[];
+};
+
+struct brcmf_escan_params_le {
+	__le32 version;
+	__le16 action;
+	__le16 sync_id;
+	struct brcmf_scan_params_le params_le;
+};
+
+struct brcmf_escan_result_le {
+	__le32 buflen;
+	__le32 version;
+	__le16 sync_id;
+	__le16 bss_count;
+	struct brcmf_bss_info_le bss_info_le;
+};
+
+#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(struct brcmf_escan_result_le) - \
+	sizeof(struct brcmf_bss_info_le))
+
+/* used for association with a specific BSSID and chanspec list */
+struct brcmf_assoc_params_le {
+	/* 00:00:00:00:00:00: broadcast scan */
+	u8 bssid[ETH_ALEN];
+	/* 0: all available channels, otherwise count of chanspecs in
+	 * chanspec_list */
+	__le32 chanspec_num;
+	/* list of chanspecs */
+	__le16 chanspec_list[1];
+};
+
+/* used for join with or without a specific bssid and channel list */
+struct brcmf_join_params {
+	struct brcmf_ssid_le ssid_le;
+	struct brcmf_assoc_params_le params_le;
+};
+
+/* scan params for extended join */
+struct brcmf_join_scan_params_le {
+	u8 scan_type;		/* 0 use default, active or passive scan */
+	__le32 nprobes;		/* -1 use default, nr of probes per channel */
+	__le32 active_time;	/* -1 use default, dwell time per channel for
+				 * active scanning
+				 */
+	__le32 passive_time;	/* -1 use default, dwell time per channel
+				 * for passive scanning
+				 */
+	__le32 home_time;	/* -1 use default, dwell time for the home
+				 * channel between channel scans
+				 */
+};
+
+/* extended join params */
+struct brcmf_ext_join_params_le {
+	struct brcmf_ssid_le ssid_le;	/* {0, ""}: wildcard scan */
+	struct brcmf_join_scan_params_le scan_le;
+	struct brcmf_assoc_params_le assoc_le;
+};
+
+struct brcmf_wsec_key {
+	u32 index;		/* key index */
+	u32 len;		/* key length */
+	u8 data[WLAN_MAX_KEY_LEN];	/* key data */
+	u32 pad_1[18];
+	u32 algo;	/* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+	u32 flags;	/* misc flags */
+	u32 pad_2[3];
+	u32 iv_initialized;	/* has IV been initialized already? */
+	u32 pad_3;
+	/* Rx IV */
+	struct {
+		u32 hi;	/* upper 32 bits of IV */
+		u16 lo;	/* lower 16 bits of IV */
+	} rxiv;
+	u32 pad_4[2];
+	u8 ea[ETH_ALEN];	/* per station */
+};
+
+/*
+ * dongle requires same struct as above but with fields in little endian order
+ */
+struct brcmf_wsec_key_le {
+	__le32 index;		/* key index */
+	__le32 len;		/* key length */
+	u8 data[WLAN_MAX_KEY_LEN];	/* key data */
+	__le32 pad_1[18];
+	__le32 algo;	/* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+	__le32 flags;	/* misc flags */
+	__le32 pad_2[3];
+	__le32 iv_initialized;	/* has IV been initialized already? */
+	__le32 pad_3;
+	/* Rx IV */
+	struct {
+		__le32 hi;	/* upper 32 bits of IV */
+		__le16 lo;	/* lower 16 bits of IV */
+	} rxiv;
+	__le32 pad_4[2];
+	u8 ea[ETH_ALEN];	/* per station */
+};
+
+/* Used to get specific STA parameters */
+struct brcmf_scb_val_le {
+	__le32 val;
+	u8 ea[ETH_ALEN];
+};
+
+/* channel encoding */
+struct brcmf_channel_info_le {
+	__le32 hw_channel;
+	__le32 target_channel;
+	__le32 scan_channel;
+};
+
+struct brcmf_sta_info_le {
+	__le16	ver;		/* version of this struct */
+	__le16	len;		/* length in bytes of this structure */
+	__le16	cap;		/* sta's advertised capabilities */
+	__le32	flags;		/* flags defined below */
+	__le32	idle;		/* time since data pkt rx'd from sta */
+	u8	ea[ETH_ALEN];		/* Station address */
+	__le32	count;			/* # rates in this set */
+	u8	rates[BRCMF_MAXRATES_IN_SET];	/* rates in 500kbps units */
+						/* w/hi bit set if basic */
+	__le32	in;		/* seconds elapsed since associated */
+	__le32	listen_interval_inms; /* Min Listen interval in ms for STA */
+	__le32	tx_pkts;	/* # of packets transmitted */
+	__le32	tx_failures;	/* # of packets failed */
+	__le32	rx_ucast_pkts;	/* # of unicast packets received */
+	__le32	rx_mcast_pkts;	/* # of multicast packets received */
+	__le32	tx_rate;	/* Rate of last successful tx frame */
+	__le32	rx_rate;	/* Rate of last successful rx frame */
+	__le32	rx_decrypt_succeeds;	/* # of packet decrypted successfully */
+	__le32	rx_decrypt_failures;	/* # of packet decrypted failed */
+};
+
+struct brcmf_chanspec_list {
+	__le32	count;		/* # of entries */
+	__le32	element[1];	/* variable length uint32 list */
+};
+
+/*
+ * WLC_E_PROBRESP_MSG
+ * WLC_E_P2P_PROBREQ_MSG
+ * WLC_E_ACTION_FRAME_RX
+ */
+struct brcmf_rx_mgmt_data {
+	__be16	version;
+	__be16	chanspec;
+	__be32	rssi;
+	__be32	mactime;
+	__be32	rate;
+};
+
 #endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index d0cd0bf95c5a..c3e7d76dbf35 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -27,7 +27,6 @@
 #include <brcmu_utils.h>
 #include <brcmu_wifi.h>
 #include "dhd.h"
-#include "dhd_proto.h"
 #include "dhd_dbg.h"
 #include "dhd_bus.h"
 #include "fwil.h"
@@ -36,6 +35,7 @@
 #include "fwsignal.h"
 #include "p2p.h"
 #include "wl_cfg80211.h"
+#include "proto.h"
 
 /**
  * DOC: Firmware Signalling
@@ -105,6 +105,7 @@ static struct {
 };
 #undef BRCMF_FWS_TLV_DEF
 
+
 static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
 {
 	int i;
@@ -123,6 +124,12 @@ static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
 #endif /* DEBUG */
 
 /*
+ * The PKTTAG tlv has additional bytes when firmware-signalling
+ * mode has REUSESEQ flag set.
+ */
+#define BRCMF_FWS_TYPE_SEQ_LEN				2
+
+/*
  * flags used to enable tlv signalling from firmware.
  */
 #define BRCMF_FWS_FLAGS_RSSI_SIGNALS			0x0001
@@ -147,8 +154,15 @@ static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
 #define BRCMF_FWS_HTOD_FLAG_PKTFROMHOST			0x01
 #define BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED		0x02
 
-#define BRCMF_FWS_RET_OK_NOSCHEDULE	0
-#define BRCMF_FWS_RET_OK_SCHEDULE	1
+#define BRCMF_FWS_RET_OK_NOSCHEDULE			0
+#define BRCMF_FWS_RET_OK_SCHEDULE			1
+
+#define BRCMF_FWS_MODE_REUSESEQ_SHIFT			3	/* seq reuse */
+#define BRCMF_FWS_MODE_SET_REUSESEQ(x, val)	((x) = \
+		((x) & ~(1 << BRCMF_FWS_MODE_REUSESEQ_SHIFT)) | \
+		(((val) & 1) << BRCMF_FWS_MODE_REUSESEQ_SHIFT))
+#define BRCMF_FWS_MODE_GET_REUSESEQ(x)	\
+		(((x) >> BRCMF_FWS_MODE_REUSESEQ_SHIFT) & 1)
 
 /**
  * enum brcmf_fws_skb_state - indicates processing state of skb.
@@ -171,6 +185,7 @@ enum brcmf_fws_skb_state {
  * @bus_flags: 2 bytes reserved for bus specific parameters
  * @if_flags: holds interface index and packet related flags.
  * @htod: host to device packet identifier (used in PKTTAG tlv).
+ * @htod_seq: this 16-bit is original seq number for every suppress packet.
  * @state: transmit state of the packet.
  * @mac: descriptor related to destination for this packet.
  *
@@ -181,6 +196,7 @@ struct brcmf_skbuff_cb {
 	u16 bus_flags;
 	u16 if_flags;
 	u32 htod;
+	u16 htod_seq;
 	enum brcmf_fws_skb_state state;
 	struct brcmf_fws_mac_descriptor *mac;
 };
@@ -257,6 +273,22 @@ struct brcmf_skbuff_cb {
 			BRCMF_SKB_HTOD_TAG_ ## field ## _MASK, \
 			BRCMF_SKB_HTOD_TAG_ ## field ## _SHIFT)
 
+#define BRCMF_SKB_HTOD_SEQ_FROMFW_MASK			0x2000
+#define BRCMF_SKB_HTOD_SEQ_FROMFW_SHIFT			13
+#define BRCMF_SKB_HTOD_SEQ_FROMDRV_MASK			0x1000
+#define BRCMF_SKB_HTOD_SEQ_FROMDRV_SHIFT		12
+#define BRCMF_SKB_HTOD_SEQ_NR_MASK			0x0fff
+#define BRCMF_SKB_HTOD_SEQ_NR_SHIFT			0
+
+#define brcmf_skb_htod_seq_set_field(skb, field, value) \
+	brcmu_maskset16(&(brcmf_skbcb(skb)->htod_seq), \
+			BRCMF_SKB_HTOD_SEQ_ ## field ## _MASK, \
+			BRCMF_SKB_HTOD_SEQ_ ## field ## _SHIFT, (value))
+#define brcmf_skb_htod_seq_get_field(skb, field) \
+	brcmu_maskget16(brcmf_skbcb(skb)->htod_seq, \
+			BRCMF_SKB_HTOD_SEQ_ ## field ## _MASK, \
+			BRCMF_SKB_HTOD_SEQ_ ## field ## _SHIFT)
+
 #define BRCMF_FWS_TXSTAT_GENERATION_MASK	0x80000000
 #define BRCMF_FWS_TXSTAT_GENERATION_SHIFT	31
 #define BRCMF_FWS_TXSTAT_FLAGS_MASK		0x78000000
@@ -265,8 +297,8 @@ struct brcmf_skbuff_cb {
 #define BRCMF_FWS_TXSTAT_FIFO_SHIFT		24
 #define BRCMF_FWS_TXSTAT_HSLOT_MASK		0x00FFFF00
 #define BRCMF_FWS_TXSTAT_HSLOT_SHIFT		8
-#define BRCMF_FWS_TXSTAT_PKTID_MASK		0x00FFFFFF
-#define BRCMF_FWS_TXSTAT_PKTID_SHIFT		0
+#define BRCMF_FWS_TXSTAT_FREERUN_MASK		0x000000FF
+#define BRCMF_FWS_TXSTAT_FREERUN_SHIFT		0
 
 #define brcmf_txstatus_get_field(txs, field) \
 	brcmu_maskget32(txs, BRCMF_FWS_TXSTAT_ ## field ## _MASK, \
@@ -443,6 +475,7 @@ struct brcmf_fws_info {
 	unsigned long borrow_defer_timestamp;
 	bool bus_flow_blocked;
 	bool creditmap_received;
+	u8 mode;
 };
 
 /*
@@ -805,20 +838,23 @@ static void brcmf_fws_cleanup(struct brcmf_fws_info *fws, int ifidx)
 	brcmf_fws_hanger_cleanup(fws, matchfn, ifidx);
 }
 
-static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
+static u8 brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
 {
 	struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
 	u8 *wlh;
 	u16 data_offset = 0;
 	u8 fillers;
 	__le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod);
+	__le16 pktseq = cpu_to_le16(brcmf_skbcb(skb)->htod_seq);
 
-	brcmf_dbg(TRACE, "enter: %s, idx=%d pkttag=0x%08X, hslot=%d\n",
+	brcmf_dbg(TRACE, "enter: %s, idx=%d hslot=%d htod %X seq %X\n",
 		  entry->name, brcmf_skb_if_flags_get_field(skb, INDEX),
-		  le32_to_cpu(pkttag), (le32_to_cpu(pkttag) >> 8) & 0xffff);
+		  (le32_to_cpu(pkttag) >> 8) & 0xffff,
+		  brcmf_skbcb(skb)->htod, brcmf_skbcb(skb)->htod_seq);
 	if (entry->send_tim_signal)
 		data_offset += 2 + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
-
+	if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode))
+		data_offset += BRCMF_FWS_TYPE_SEQ_LEN;
 	/* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
 	data_offset += 2 + BRCMF_FWS_TYPE_PKTTAG_LEN;
 	fillers = round_up(data_offset, 4) - data_offset;
@@ -830,7 +866,12 @@ static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
 	wlh[0] = BRCMF_FWS_TYPE_PKTTAG;
 	wlh[1] = BRCMF_FWS_TYPE_PKTTAG_LEN;
 	memcpy(&wlh[2], &pkttag, sizeof(pkttag));
-	wlh += BRCMF_FWS_TYPE_PKTTAG_LEN + 2;
+	if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode)) {
+		wlh[1] += BRCMF_FWS_TYPE_SEQ_LEN;
+		memcpy(&wlh[2 + BRCMF_FWS_TYPE_PKTTAG_LEN], &pktseq,
+		       sizeof(pktseq));
+	}
+	wlh += wlh[1] + 2;
 
 	if (entry->send_tim_signal) {
 		entry->send_tim_signal = 0;
@@ -846,9 +887,7 @@ static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
 	if (fillers)
 		memset(wlh, BRCMF_FWS_TYPE_FILLER, fillers);
 
-	brcmf_proto_hdrpush(fws->drvr, brcmf_skb_if_flags_get_field(skb, INDEX),
-			    data_offset >> 2, skb);
-	return 0;
+	return (u8)(data_offset >> 2);
 }
 
 static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
@@ -856,10 +895,11 @@ static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
 				 int fifo, bool send_immediately)
 {
 	struct sk_buff *skb;
-	struct brcmf_bus *bus;
 	struct brcmf_skbuff_cb *skcb;
 	s32 err;
 	u32 len;
+	u8 data_offset;
+	int ifidx;
 
 	/* check delayedQ and suppressQ in one call using bitmap */
 	if (brcmu_pktq_mlen(&entry->psq, 3 << (fifo * 2)) == 0)
@@ -875,6 +915,7 @@ static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
 		/* create a dummy packet and sent that. The traffic          */
 		/* bitmap info will automatically be attached to that packet */
 		len = BRCMF_FWS_TYPE_PKTTAG_LEN + 2 +
+		      BRCMF_FWS_TYPE_SEQ_LEN +
 		      BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2 +
 		      4 + fws->drvr->hdrlen;
 		skb = brcmu_pkt_buf_get_skb(len);
@@ -884,13 +925,13 @@ static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
 		skcb = brcmf_skbcb(skb);
 		skcb->mac = entry;
 		skcb->state = BRCMF_FWS_SKBSTATE_TIM;
-		bus = fws->drvr->bus_if;
-		err = brcmf_fws_hdrpush(fws, skb);
-		if (err == 0) {
-			brcmf_fws_unlock(fws);
-			err = brcmf_bus_txdata(bus, skb);
-			brcmf_fws_lock(fws);
-		}
+		skcb->htod = 0;
+		skcb->htod_seq = 0;
+		data_offset = brcmf_fws_hdrpush(fws, skb);
+		ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
+		brcmf_fws_unlock(fws);
+		err = brcmf_proto_txdata(fws->drvr, ifidx, data_offset, skb);
+		brcmf_fws_lock(fws);
 		if (err)
 			brcmu_pkt_buf_free_skb(skb);
 		return true;
@@ -1172,8 +1213,13 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws,
 {
 	int prec = 2 * fifo;
 	u32 *qfull_stat = &fws->stats.delayq_full_error;
-
 	struct brcmf_fws_mac_descriptor *entry;
+	struct pktq *pq;
+	struct sk_buff_head *queue;
+	struct sk_buff *p_head;
+	struct sk_buff *p_tail;
+	u32 fr_new;
+	u32 fr_compare;
 
 	entry = brcmf_skbcb(p)->mac;
 	if (entry == NULL) {
@@ -1185,9 +1231,55 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws,
 	if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) {
 		prec += 1;
 		qfull_stat = &fws->stats.supprq_full_error;
-	}
 
-	if (brcmu_pktq_penq(&entry->psq, prec, p) == NULL) {
+		/* Fix out of order delivery of frames. Dont assume frame    */
+		/* can be inserted at the end, but look for correct position */
+		pq = &entry->psq;
+		if (pktq_full(pq) || pktq_pfull(pq, prec)) {
+			*qfull_stat += 1;
+			return -ENFILE;
+		}
+		queue = &pq->q[prec].skblist;
+
+		p_head = skb_peek(queue);
+		p_tail = skb_peek_tail(queue);
+		fr_new = brcmf_skb_htod_tag_get_field(p, FREERUN);
+
+		while (p_head != p_tail) {
+			fr_compare = brcmf_skb_htod_tag_get_field(p_tail,
+								  FREERUN);
+			/* be sure to handle wrap of 256 */
+			if (((fr_new > fr_compare) &&
+			     ((fr_new - fr_compare) < 128)) ||
+			    ((fr_new < fr_compare) &&
+			     ((fr_compare - fr_new) > 128)))
+				break;
+			p_tail = skb_queue_prev(queue, p_tail);
+		}
+		/* Position found. Determine what to do */
+		if (p_tail == NULL) {
+			/* empty list */
+			__skb_queue_tail(queue, p);
+		} else {
+			fr_compare = brcmf_skb_htod_tag_get_field(p_tail,
+								  FREERUN);
+			if (((fr_new > fr_compare) &&
+			     ((fr_new - fr_compare) < 128)) ||
+			    ((fr_new < fr_compare) &&
+			     ((fr_compare - fr_new) > 128))) {
+				/* After tail */
+				__skb_queue_after(queue, p_tail, p);
+			} else {
+				/* Before tail */
+				__skb_insert(p, p_tail->prev, p_tail, queue);
+			}
+		}
+
+		/* Complete the counters and statistics */
+		pq->len++;
+		if (pq->hi_prec < prec)
+			pq->hi_prec = (u8) prec;
+	} else if (brcmu_pktq_penq(&entry->psq, prec, p) == NULL) {
 		*qfull_stat += 1;
 		return -ENFILE;
 	}
@@ -1277,7 +1369,8 @@ done:
 }
 
 static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
-					 struct sk_buff *skb, u32 genbit)
+					 struct sk_buff *skb, u32 genbit,
+					 u16 seq)
 {
 	struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
 	u32 hslot;
@@ -1297,9 +1390,19 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
 	entry->generation = genbit;
 
 	ret = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
-	if (ret == 0)
+	if (ret == 0) {
+		brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit);
+		brcmf_skbcb(skb)->htod_seq = seq;
+		if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) {
+			brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1);
+			brcmf_skb_htod_seq_set_field(skb, FROMFW, 0);
+		} else {
+			brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0);
+		}
 		ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo,
 				    skb);
+	}
+
 	if (ret != 0) {
 		/* suppress q is full or hdrpull failed, drop this packet */
 		brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
@@ -1317,7 +1420,7 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
 
 static int
 brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
-			   u32 genbit)
+		      u32 genbit, u16 seq)
 {
 	u32 fifo;
 	int ret;
@@ -1360,8 +1463,8 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
 	if (entry->suppressed && entry->suppr_transit_count)
 		entry->suppr_transit_count--;
 
-	brcmf_dbg(DATA, "%s flags %X htod %X\n", entry->name, skcb->if_flags,
-		  skcb->htod);
+	brcmf_dbg(DATA, "%s flags %d htod %X seq %X\n", entry->name, flags,
+		  skcb->htod, seq);
 
 	/* pick up the implicit credit from this packet */
 	fifo = brcmf_skb_htod_tag_get_field(skb, FIFO);
@@ -1374,7 +1477,8 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
 	brcmf_fws_macdesc_return_req_credit(skb);
 
 	if (!remove_from_hanger)
-		ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit);
+		ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit,
+						    seq);
 
 	if (remove_from_hanger || ret)
 		brcmf_txfinalize(fws->drvr, skb, true);
@@ -1406,10 +1510,12 @@ static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
 static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
 {
 	__le32 status_le;
+	__le16 seq_le;
 	u32 status;
 	u32 hslot;
 	u32 genbit;
 	u8 flags;
+	u16 seq;
 
 	fws->stats.txs_indicate++;
 	memcpy(&status_le, data, sizeof(status_le));
@@ -1417,9 +1523,16 @@ static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
 	flags = brcmf_txstatus_get_field(status, FLAGS);
 	hslot = brcmf_txstatus_get_field(status, HSLOT);
 	genbit = brcmf_txstatus_get_field(status, GENERATION);
+	if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode)) {
+		memcpy(&seq_le, &data[BRCMF_FWS_TYPE_PKTTAG_LEN],
+		       sizeof(seq_le));
+		seq = le16_to_cpu(seq_le);
+	} else {
+		seq = 0;
+	}
 
 	brcmf_fws_lock(fws);
-	brcmf_fws_txs_process(fws, flags, hslot, genbit);
+	brcmf_fws_txs_process(fws, flags, hslot, genbit, seq);
 	brcmf_fws_unlock(fws);
 	return BRCMF_FWS_RET_OK_NOSCHEDULE;
 }
@@ -1603,15 +1716,15 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
 	return 0;
 }
 
-static void brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
+static u8 brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
 				   struct sk_buff *p)
 {
 	struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
 	struct brcmf_fws_mac_descriptor *entry = skcb->mac;
 	u8 flags;
 
-	brcmf_skb_if_flags_set_field(p, TRANSMIT, 1);
-	brcmf_skb_htod_tag_set_field(p, GENERATION, entry->generation);
+	if (skcb->state != BRCMF_FWS_SKBSTATE_SUPPRESSED)
+		brcmf_skb_htod_tag_set_field(p, GENERATION, entry->generation);
 	flags = BRCMF_FWS_HTOD_FLAG_PKTFROMHOST;
 	if (brcmf_skb_if_flags_get_field(p, REQUESTED)) {
 		/*
@@ -1621,7 +1734,7 @@ static void brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
 		flags |= BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED;
 	}
 	brcmf_skb_htod_tag_set_field(p, FLAGS, flags);
-	brcmf_fws_hdrpush(fws, p);
+	return brcmf_fws_hdrpush(fws, p);
 }
 
 static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws,
@@ -1652,7 +1765,7 @@ static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws,
 		fws->stats.rollback_failed++;
 		hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
 		brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED,
-				      hslot, 0);
+				      hslot, 0, 0);
 	} else {
 		fws->stats.rollback_success++;
 		brcmf_fws_return_credits(fws, fifo, 1);
@@ -1689,20 +1802,21 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
 {
 	struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
 	struct brcmf_fws_mac_descriptor *entry;
-	struct brcmf_bus *bus = fws->drvr->bus_if;
 	int rc;
 	u8 ifidx;
+	u8 data_offset;
 
 	entry = skcb->mac;
 	if (IS_ERR(entry))
 		return PTR_ERR(entry);
 
-	brcmf_fws_precommit_skb(fws, fifo, skb);
+	data_offset = brcmf_fws_precommit_skb(fws, fifo, skb);
 	entry->transit_count++;
 	if (entry->suppressed)
 		entry->suppr_transit_count++;
+	ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
 	brcmf_fws_unlock(fws);
-	rc = brcmf_bus_txdata(bus, skb);
+	rc = brcmf_proto_txdata(fws->drvr, ifidx, data_offset, skb);
 	brcmf_fws_lock(fws);
 	brcmf_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name,
 		  skcb->if_flags, skcb->htod, rc);
@@ -1732,6 +1846,8 @@ static int brcmf_fws_assign_htod(struct brcmf_fws_info *fws, struct sk_buff *p,
 	struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
 	int rc, hslot;
 
+	skcb->htod = 0;
+	skcb->htod_seq = 0;
 	hslot = brcmf_fws_hanger_get_free_slot(&fws->hanger);
 	brcmf_skb_htod_tag_set_field(p, HSLOT, hslot);
 	brcmf_skb_htod_tag_set_field(p, FREERUN, skcb->mac->seq[fifo]);
@@ -1757,7 +1873,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
 	brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
 	/* determine the priority */
 	if (!skb->priority)
-		skb->priority = cfg80211_classify8021d(skb);
+		skb->priority = cfg80211_classify8021d(skb, NULL);
 
 	drvr->tx_multicast += !!multicast;
 	if (pae)
@@ -1861,10 +1977,9 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
 							&skb, true);
 				ifidx = brcmf_skb_if_flags_get_field(skb,
 								     INDEX);
-				brcmf_proto_hdrpush(drvr, ifidx, 0, skb);
-				/* Use bus module to send data frame */
+				/* Use proto layer to send data frame */
 				brcmf_fws_unlock(fws);
-				ret = brcmf_bus_txdata(drvr->bus_if, skb);
+				ret = brcmf_proto_txdata(drvr, ifidx, 0, skb);
 				brcmf_fws_lock(fws);
 				if (ret < 0)
 					brcmf_txfinalize(drvr, skb, false);
@@ -1908,6 +2023,7 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
 	struct brcmf_fws_info *fws;
 	u32 tlv = BRCMF_FWS_FLAGS_RSSI_SIGNALS;
 	int rc;
+	u32 mode;
 
 	drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL);
 	if (!drvr->fws) {
@@ -1966,6 +2082,18 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
 	if (brcmf_fil_iovar_int_set(drvr->iflist[0], "ampdu_hostreorder", 1))
 		brcmf_dbg(INFO, "enabling AMPDU host-reorder failed\n");
 
+	/* Enable seq number reuse, if supported */
+	if (brcmf_fil_iovar_int_get(drvr->iflist[0], "wlfc_mode", &mode) == 0) {
+		if (BRCMF_FWS_MODE_GET_REUSESEQ(mode)) {
+			mode = 0;
+			BRCMF_FWS_MODE_SET_REUSESEQ(mode, 1);
+			if (brcmf_fil_iovar_int_set(drvr->iflist[0],
+						    "wlfc_mode", mode) == 0) {
+				BRCMF_FWS_MODE_SET_REUSESEQ(fws->mode, 1);
+			}
+		}
+	}
+
 	brcmf_fws_hanger_init(&fws->hanger);
 	brcmf_fws_macdesc_init(&fws->desc.other, NULL, 0);
 	brcmf_fws_macdesc_set_name(fws, &fws->desc.other);
@@ -2022,7 +2150,7 @@ void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
 	}
 	brcmf_fws_lock(fws);
 	hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
-	brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0);
+	brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0, 0);
 	brcmf_fws_unlock(fws);
 }
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/nvram.c b/drivers/net/wireless/brcm80211/brcmfmac/nvram.c
new file mode 100644
index 000000000000..d5ef86db631b
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/nvram.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/firmware.h>
+
+#include "nvram.h"
+
+/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a file
+ * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
+ * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
+ * End of buffer is completed with token identifying length of buffer.
+ */
+void *brcmf_nvram_strip(const struct firmware *nv, u32 *new_length)
+{
+	u8 *nvram;
+	u32 i;
+	u32 len;
+	u32 column;
+	u8 val;
+	bool comment;
+	u32 token;
+	__le32 token_le;
+
+	/* Alloc for extra 0 byte + roundup by 4 + length field */
+	nvram = kmalloc(nv->size + 1 + 3 + sizeof(token_le), GFP_KERNEL);
+	if (!nvram)
+		return NULL;
+
+	len = 0;
+	column = 0;
+	comment = false;
+	for (i = 0; i < nv->size; i++) {
+		val = nv->data[i];
+		if (val == 0)
+			break;
+		if (val == '\r')
+			continue;
+		if (comment && (val != '\n'))
+			continue;
+		comment = false;
+		if (val == '#') {
+			comment = true;
+			continue;
+		}
+		if (val == '\n') {
+			if (column == 0)
+				continue;
+			nvram[len] = 0;
+			len++;
+			column = 0;
+			continue;
+		}
+		nvram[len] = val;
+		len++;
+		column++;
+	}
+	column = len;
+	*new_length = roundup(len + 1, 4);
+	while (column != *new_length) {
+		nvram[column] = 0;
+		column++;
+	}
+
+	token = *new_length / 4;
+	token = (~token << 16) | (token & 0x0000FFFF);
+	token_le = cpu_to_le32(token);
+
+	memcpy(&nvram[*new_length], &token_le, sizeof(token_le));
+	*new_length += sizeof(token_le);
+
+	return nvram;
+}
+
+void brcmf_nvram_free(void *nvram)
+{
+	kfree(nvram);
+}
+
+
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/nvram.h b/drivers/net/wireless/brcm80211/brcmfmac/nvram.h
new file mode 100644
index 000000000000..d454580928c9
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/nvram.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_NVRAM_H
+#define BRCMFMAC_NVRAM_H
+
+
+void *brcmf_nvram_strip(const struct firmware *nv, u32 *new_length);
+void brcmf_nvram_free(void *nvram);
+
+
+#endif /* BRCMFMAC_NVRAM_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index 4a2293041821..fc4f98b275d7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -812,7 +812,7 @@ static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg,
 			struct ieee80211_channel *chan = request->channels[i];
 
 			if (chan->flags & (IEEE80211_CHAN_RADAR |
-					   IEEE80211_CHAN_PASSIVE_SCAN))
+					   IEEE80211_CHAN_NO_IR))
 				continue;
 
 			chanspecs[i] = channel_to_chanspec(&p2p->cfg->d11inf,
@@ -1243,7 +1243,7 @@ bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
 					    IEEE80211_P2P_ATTR_DEVICE_ID,
 					    p2p_dev_addr, sizeof(p2p_dev_addr));
 	if ((err >= 0) &&
-	    (!memcmp(p2p_dev_addr, afx_hdl->tx_dst_addr, ETH_ALEN))) {
+	    (ether_addr_equal(p2p_dev_addr, afx_hdl->tx_dst_addr))) {
 		if (!bi->ctl_ch) {
 			ch.chspec = le16_to_cpu(bi->chanspec);
 			cfg->d11inf.decchspec(&ch);
@@ -1380,8 +1380,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
 		    (brcmf_p2p_gon_req_collision(p2p, (u8 *)e->addr))) {
 			if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
 				     &p2p->status) &&
-			    (memcmp(afx_hdl->tx_dst_addr, e->addr,
-				    ETH_ALEN) == 0)) {
+			    (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) {
 				afx_hdl->peer_chan = ch.chnum;
 				brcmf_dbg(INFO, "GON request: Peer found, channel=%d\n",
 					  afx_hdl->peer_chan);
@@ -1865,7 +1864,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
 	cfg->d11inf.decchspec(&ch);
 
 	if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) &&
-	    (memcmp(afx_hdl->tx_dst_addr, e->addr, ETH_ALEN) == 0)) {
+	    (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) {
 		afx_hdl->peer_chan = ch.chnum;
 		brcmf_dbg(INFO, "PROBE REQUEST: Peer found, channel=%d\n",
 			  afx_hdl->peer_chan);
@@ -1956,21 +1955,21 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg)
 		err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
 		if (err < 0) {
 			brcmf_err("set p2p_disc error\n");
-			brcmf_free_vif(cfg, p2p_vif);
+			brcmf_free_vif(p2p_vif);
 			goto exit;
 		}
 		/* obtain bsscfg index for P2P discovery */
 		err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx);
 		if (err < 0) {
 			brcmf_err("retrieving discover bsscfg index failed\n");
-			brcmf_free_vif(cfg, p2p_vif);
+			brcmf_free_vif(p2p_vif);
 			goto exit;
 		}
 		/* Verify that firmware uses same bssidx as driver !! */
 		if (p2p_ifp->bssidx != bssidx) {
 			brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n",
 				  bssidx, p2p_ifp->bssidx);
-			brcmf_free_vif(cfg, p2p_vif);
+			brcmf_free_vif(p2p_vif);
 			goto exit;
 		}
 
@@ -1998,7 +1997,7 @@ void brcmf_p2p_detach(struct brcmf_p2p_info *p2p)
 		brcmf_p2p_cancel_remain_on_channel(vif->ifp);
 		brcmf_p2p_deinit_discovery(p2p);
 		/* remove discovery interface */
-		brcmf_free_vif(p2p->cfg, vif);
+		brcmf_free_vif(vif);
 		p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
 	}
 	/* just set it all to zero */
@@ -2223,7 +2222,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
 	return &p2p_vif->wdev;
 
 fail:
-	brcmf_free_vif(p2p->cfg, p2p_vif);
+	brcmf_free_vif(p2p_vif);
 	return ERR_PTR(err);
 }
 
@@ -2232,31 +2231,12 @@ fail:
  *
  * @vif: virtual interface object to delete.
  */
-static void brcmf_p2p_delete_p2pdev(struct brcmf_cfg80211_info *cfg,
+static void brcmf_p2p_delete_p2pdev(struct brcmf_p2p_info *p2p,
 				    struct brcmf_cfg80211_vif *vif)
 {
 	cfg80211_unregister_wdev(&vif->wdev);
-	cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
-	brcmf_free_vif(cfg, vif);
-}
-
-/**
- * brcmf_p2p_free_p2p_if() - free up net device related data.
- *
- * @ndev: net device that needs to be freed.
- */
-static void brcmf_p2p_free_p2p_if(struct net_device *ndev)
-{
-	struct brcmf_cfg80211_info *cfg;
-	struct brcmf_cfg80211_vif *vif;
-	struct brcmf_if *ifp;
-
-	ifp = netdev_priv(ndev);
-	cfg = ifp->drvr->config;
-	vif = ifp->vif;
-
-	brcmf_free_vif(cfg, vif);
-	free_netdev(ifp->ndev);
+	p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
+	brcmf_free_vif(vif);
 }
 
 /**
@@ -2336,8 +2316,6 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
 		brcmf_err("Registering netdevice failed\n");
 		goto fail;
 	}
-	/* override destructor */
-	ifp->ndev->destructor = brcmf_p2p_free_p2p_if;
 
 	cfg->p2p.bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = vif;
 	/* Disable firmware roaming for P2P interface  */
@@ -2350,7 +2328,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
 	return &ifp->vif->wdev;
 
 fail:
-	brcmf_free_vif(cfg, vif);
+	brcmf_free_vif(vif);
 	return ERR_PTR(err);
 }
 
@@ -2359,8 +2337,6 @@ fail:
  *
  * @wiphy: wiphy device of interface.
  * @wdev: wireless device of interface.
- *
- * TODO: not yet supported.
  */
 int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
 {
@@ -2386,7 +2362,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
 		break;
 
 	case NL80211_IFTYPE_P2P_DEVICE:
-		brcmf_p2p_delete_p2pdev(cfg, vif);
+		brcmf_p2p_delete_p2pdev(p2p, vif);
 		return 0;
 	default:
 		return -ENOTSUPP;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/brcm80211/brcmfmac/proto.c
new file mode 100644
index 000000000000..b6b464184946
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/proto.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+ #include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+
+#include <brcmu_wifi.h>
+#include "dhd.h"
+#include "dhd_dbg.h"
+#include "proto.h"
+#include "bcdc.h"
+
+
+int brcmf_proto_attach(struct brcmf_pub *drvr)
+{
+	struct brcmf_proto *proto;
+
+	proto = kzalloc(sizeof(*proto), GFP_ATOMIC);
+	if (!proto)
+		goto fail;
+
+	drvr->proto = proto;
+	/* BCDC protocol is only protocol supported for the moment */
+	if (brcmf_proto_bcdc_attach(drvr))
+		goto fail;
+
+	if ((proto->txdata == NULL) || (proto->hdrpull == NULL) ||
+	    (proto->query_dcmd == NULL) || (proto->set_dcmd == NULL)) {
+		brcmf_err("Not all proto handlers have been installed\n");
+		goto fail;
+	}
+	return 0;
+
+fail:
+	kfree(proto);
+	drvr->proto = NULL;
+	return -ENOMEM;
+}
+
+void brcmf_proto_detach(struct brcmf_pub *drvr)
+{
+	if (drvr->proto) {
+		brcmf_proto_bcdc_detach(drvr);
+		kfree(drvr->proto);
+		drvr->proto = NULL;
+	}
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/brcm80211/brcmfmac/proto.h
new file mode 100644
index 000000000000..482fb0ba4a30
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/proto.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_PROTO_H
+#define BRCMFMAC_PROTO_H
+
+struct brcmf_proto {
+	int (*hdrpull)(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
+		       struct sk_buff *skb);
+	int (*query_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd,
+			  void *buf, uint len);
+	int (*set_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf,
+			uint len);
+	int (*txdata)(struct brcmf_pub *drvr, int ifidx, u8 offset,
+		      struct sk_buff *skb);
+	void *pd;
+};
+
+
+int brcmf_proto_attach(struct brcmf_pub *drvr);
+void brcmf_proto_detach(struct brcmf_pub *drvr);
+
+static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws,
+				      u8 *ifidx, struct sk_buff *skb)
+{
+	return drvr->proto->hdrpull(drvr, do_fws, ifidx, skb);
+}
+static inline int brcmf_proto_query_dcmd(struct brcmf_pub *drvr, int ifidx,
+					 uint cmd, void *buf, uint len)
+{
+	return drvr->proto->query_dcmd(drvr, ifidx, cmd, buf, len);
+}
+static inline int brcmf_proto_set_dcmd(struct brcmf_pub *drvr, int ifidx,
+				       uint cmd, void *buf, uint len)
+{
+	return drvr->proto->set_dcmd(drvr, ifidx, cmd, buf, len);
+}
+static inline int brcmf_proto_txdata(struct brcmf_pub *drvr, int ifidx,
+				       u8 offset, struct sk_buff *skb)
+{
+	return drvr->proto->txdata(drvr, ifidx, offset, skb);
+}
+
+
+#endif /* BRCMFMAC_PROTO_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index 2096a14ef1fb..82bf3c5d3cdc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -19,6 +19,7 @@
 #include <linux/netdevice.h>
 #include <linux/mmc/card.h>
 #include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
 #include <linux/ssb/ssb_regs.h>
 #include <linux/bcma/bcma.h>
 
@@ -50,6 +51,9 @@
 #define BCM43143_CORE_ARM_BASE		0x18003000
 #define BCM43143_RAMSIZE		0x70000
 
+/* All D11 cores, ID 0x812 */
+#define BCM43xx_CORE_D11_BASE		0x18001000
+
 #define	SBCOREREV(sbidh) \
 	((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
 	  ((sbidh) & SSB_IDHIGH_RCLO))
@@ -65,6 +69,10 @@
 /* ARM CR4 core specific control flag bits */
 #define ARMCR4_BCMA_IOCTL_CPUHALT	0x0020
 
+/* D11 core specific control flag bits */
+#define D11_BCMA_IOCTL_PHYCLOCKEN	0x0004
+#define D11_BCMA_IOCTL_PHYRESET		0x0008
+
 #define SDIOD_DRVSTR_KEY(chip, pmu)     (((chip) << 16) | (pmu))
 /* SDIO Pad drive strength to select value mappings */
 struct sdiod_drive_str {
@@ -83,6 +91,24 @@ static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
 	{0, 0x1}
 };
 
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
+        {6, 0x7},
+        {5, 0x6},
+        {4, 0x5},
+        {3, 0x4},
+        {2, 0x2},
+        {1, 0x1},
+        {0, 0x0}
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
+static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
+	{3, 0x3},
+	{2, 0x2},
+	{1, 0x1},
+	{0, 0x0} };
+
 /* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
 static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
 	{16, 0x7},
@@ -92,7 +118,7 @@ static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
 };
 
 u8
-brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid)
+brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid)
 {
 	u8 idx;
 
@@ -105,22 +131,22 @@ brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid)
 
 static u32
 brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
-		      struct chip_info *ci, u16 coreid)
+		      struct brcmf_chip *ci, u16 coreid)
 {
 	u32 regdata;
 	u8 idx;
 
 	idx = brcmf_sdio_chip_getinfidx(ci, coreid);
 
-	regdata = brcmf_sdio_regrl(sdiodev,
-				   CORE_SB(ci->c_inf[idx].base, sbidhigh),
-				   NULL);
+	regdata = brcmf_sdiod_regrl(sdiodev,
+				    CORE_SB(ci->c_inf[idx].base, sbidhigh),
+				    NULL);
 	return SBCOREREV(regdata);
 }
 
 static u32
 brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
-		      struct chip_info *ci, u16 coreid)
+		      struct brcmf_chip *ci, u16 coreid)
 {
 	u8 idx;
 
@@ -131,7 +157,7 @@ brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
 
 static bool
 brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
-		       struct chip_info *ci, u16 coreid)
+		       struct brcmf_chip *ci, u16 coreid)
 {
 	u32 regdata;
 	u8 idx;
@@ -140,9 +166,9 @@ brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
 	if (idx == BRCMF_MAX_CORENUM)
 		return false;
 
-	regdata = brcmf_sdio_regrl(sdiodev,
-				   CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
-				   NULL);
+	regdata = brcmf_sdiod_regrl(sdiodev,
+				    CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+				    NULL);
 	regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
 		    SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
 	return (SSB_TMSLOW_CLOCK == regdata);
@@ -150,7 +176,7 @@ brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
 
 static bool
 brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
-		       struct chip_info *ci, u16 coreid)
+		       struct brcmf_chip *ci, u16 coreid)
 {
 	u32 regdata;
 	u8 idx;
@@ -160,13 +186,13 @@ brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
 	if (idx == BRCMF_MAX_CORENUM)
 		return false;
 
-	regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
-				   NULL);
+	regdata = brcmf_sdiod_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+				    NULL);
 	ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
 
-	regdata = brcmf_sdio_regrl(sdiodev,
-				   ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
-				   NULL);
+	regdata = brcmf_sdiod_regrl(sdiodev,
+				    ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+				    NULL);
 	ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
 
 	return ret;
@@ -174,7 +200,8 @@ brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
 
 static void
 brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
-			  struct chip_info *ci, u16 coreid, u32 core_bits)
+			  struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+			  u32 in_resetbits)
 {
 	u32 regdata, base;
 	u8 idx;
@@ -182,130 +209,126 @@ brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
 	idx = brcmf_sdio_chip_getinfidx(ci, coreid);
 	base = ci->c_inf[idx].base;
 
-	regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
+	regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
 	if (regdata & SSB_TMSLOW_RESET)
 		return;
 
-	regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
+	regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
 	if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
 		/*
 		 * set target reject and spin until busy is clear
 		 * (preserve core-specific bits)
 		 */
-		regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow),
-					   NULL);
-		brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
-				 regdata | SSB_TMSLOW_REJECT, NULL);
+		regdata = brcmf_sdiod_regrl(sdiodev,
+					    CORE_SB(base, sbtmstatelow), NULL);
+		brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+				  regdata | SSB_TMSLOW_REJECT, NULL);
 
-		regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow),
-					   NULL);
+		regdata = brcmf_sdiod_regrl(sdiodev,
+					    CORE_SB(base, sbtmstatelow), NULL);
 		udelay(1);
-		SPINWAIT((brcmf_sdio_regrl(sdiodev,
-					   CORE_SB(base, sbtmstatehigh),
-					   NULL) &
-			SSB_TMSHIGH_BUSY), 100000);
-
-		regdata = brcmf_sdio_regrl(sdiodev,
-					   CORE_SB(base, sbtmstatehigh),
-					   NULL);
+		SPINWAIT((brcmf_sdiod_regrl(sdiodev,
+					    CORE_SB(base, sbtmstatehigh),
+					    NULL) &
+			  SSB_TMSHIGH_BUSY), 100000);
+
+		regdata = brcmf_sdiod_regrl(sdiodev,
+					    CORE_SB(base, sbtmstatehigh),
+					    NULL);
 		if (regdata & SSB_TMSHIGH_BUSY)
 			brcmf_err("core state still busy\n");
 
-		regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbidlow),
-					   NULL);
+		regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
+					    NULL);
 		if (regdata & SSB_IDLOW_INITIATOR) {
-			regdata = brcmf_sdio_regrl(sdiodev,
-						   CORE_SB(base, sbimstate),
-						   NULL);
+			regdata = brcmf_sdiod_regrl(sdiodev,
+						    CORE_SB(base, sbimstate),
+						    NULL);
 			regdata |= SSB_IMSTATE_REJECT;
-			brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbimstate),
-					 regdata, NULL);
-			regdata = brcmf_sdio_regrl(sdiodev,
-						   CORE_SB(base, sbimstate),
-						   NULL);
+			brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
+					  regdata, NULL);
+			regdata = brcmf_sdiod_regrl(sdiodev,
+						    CORE_SB(base, sbimstate),
+						    NULL);
 			udelay(1);
-			SPINWAIT((brcmf_sdio_regrl(sdiodev,
-						   CORE_SB(base, sbimstate),
-						   NULL) &
-				SSB_IMSTATE_BUSY), 100000);
+			SPINWAIT((brcmf_sdiod_regrl(sdiodev,
+						    CORE_SB(base, sbimstate),
+						    NULL) &
+				  SSB_IMSTATE_BUSY), 100000);
 		}
 
 		/* set reset and reject while enabling the clocks */
 		regdata = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
 			  SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
-		brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
-				 regdata, NULL);
-		regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow),
-					   NULL);
+		brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+				  regdata, NULL);
+		regdata = brcmf_sdiod_regrl(sdiodev,
+					    CORE_SB(base, sbtmstatelow), NULL);
 		udelay(10);
 
 		/* clear the initiator reject bit */
-		regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbidlow),
-					   NULL);
+		regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
+					    NULL);
 		if (regdata & SSB_IDLOW_INITIATOR) {
-			regdata = brcmf_sdio_regrl(sdiodev,
-						   CORE_SB(base, sbimstate),
-						   NULL);
+			regdata = brcmf_sdiod_regrl(sdiodev,
+						    CORE_SB(base, sbimstate),
+						    NULL);
 			regdata &= ~SSB_IMSTATE_REJECT;
-			brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbimstate),
-					 regdata, NULL);
+			brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
+					  regdata, NULL);
 		}
 	}
 
 	/* leave reset and reject asserted */
-	brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
-			 (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET), NULL);
+	brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+			  (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET), NULL);
 	udelay(1);
 }
 
 static void
 brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
-			  struct chip_info *ci, u16 coreid, u32 core_bits)
+			  struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+			  u32 in_resetbits)
 {
 	u8 idx;
 	u32 regdata;
+	u32 wrapbase;
 
 	idx = brcmf_sdio_chip_getinfidx(ci, coreid);
 	if (idx == BRCMF_MAX_CORENUM)
 		return;
 
+	wrapbase = ci->c_inf[idx].wrapbase;
+
 	/* if core is already in reset, just return */
-	regdata = brcmf_sdio_regrl(sdiodev,
-				   ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
-				   NULL);
+	regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL);
 	if ((regdata & BCMA_RESET_CTL_RESET) != 0)
 		return;
 
-	/* ensure no pending backplane operation
-	 * 300uc should be sufficient for backplane ops to be finish
-	 * extra 10ms is taken into account for firmware load stage
-	 * after 10300us carry on disabling the core anyway
-	 */
-	SPINWAIT(brcmf_sdio_regrl(sdiodev,
-				  ci->c_inf[idx].wrapbase+BCMA_RESET_ST,
-				  NULL), 10300);
-	regdata = brcmf_sdio_regrl(sdiodev,
-				   ci->c_inf[idx].wrapbase+BCMA_RESET_ST,
-				   NULL);
-	if (regdata)
-		brcmf_err("disabling core 0x%x with reset status %x\n",
-			  coreid, regdata);
-
-	brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
-			 BCMA_RESET_CTL_RESET, NULL);
-	udelay(1);
+	/* configure reset */
+	brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
+			  BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
+	regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
 
-	brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
-			 core_bits, NULL);
-	regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
-				   NULL);
+	/* put in reset */
+	brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL,
+			  BCMA_RESET_CTL_RESET, NULL);
 	usleep_range(10, 20);
 
+	/* wait till reset is 1 */
+	SPINWAIT(brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) !=
+		 BCMA_RESET_CTL_RESET, 300);
+
+	/* post reset configure */
+	brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
+			  BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
+	regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
 }
 
 static void
 brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
-			struct chip_info *ci, u16 coreid, u32 core_bits)
+			struct brcmf_chip *ci, u16 coreid,  u32 pre_resetbits,
+			u32 in_resetbits, u32 post_resetbits)
 {
 	u32 regdata;
 	u8 idx;
@@ -318,93 +341,91 @@ brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
 	 * Must do the disable sequence first to work for
 	 * arbitrary current core state.
 	 */
-	brcmf_sdio_sb_coredisable(sdiodev, ci, coreid, 0);
+	brcmf_sdio_sb_coredisable(sdiodev, ci, coreid, pre_resetbits,
+				  in_resetbits);
 
 	/*
 	 * Now do the initialization sequence.
 	 * set reset while enabling the clock and
 	 * forcing them on throughout the core
 	 */
-	brcmf_sdio_regwl(sdiodev,
-			 CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
-			 SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET,
-			 NULL);
-	regdata = brcmf_sdio_regrl(sdiodev,
-				   CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
-				   NULL);
+	brcmf_sdiod_regwl(sdiodev,
+			  CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+			  SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET,
+			  NULL);
+	regdata = brcmf_sdiod_regrl(sdiodev,
+				    CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+				    NULL);
 	udelay(1);
 
 	/* clear any serror */
-	regdata = brcmf_sdio_regrl(sdiodev,
-				   CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
-				   NULL);
+	regdata = brcmf_sdiod_regrl(sdiodev,
+				    CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
+				    NULL);
 	if (regdata & SSB_TMSHIGH_SERR)
-		brcmf_sdio_regwl(sdiodev,
-				 CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
-				 0, NULL);
+		brcmf_sdiod_regwl(sdiodev,
+				  CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
+				  0, NULL);
 
-	regdata = brcmf_sdio_regrl(sdiodev,
-				   CORE_SB(ci->c_inf[idx].base, sbimstate),
-				   NULL);
+	regdata = brcmf_sdiod_regrl(sdiodev,
+				    CORE_SB(ci->c_inf[idx].base, sbimstate),
+				    NULL);
 	if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
-		brcmf_sdio_regwl(sdiodev,
-				 CORE_SB(ci->c_inf[idx].base, sbimstate),
-				 regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO),
-				 NULL);
+		brcmf_sdiod_regwl(sdiodev,
+				  CORE_SB(ci->c_inf[idx].base, sbimstate),
+				  regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO),
+				  NULL);
 
 	/* clear reset and allow it to propagate throughout the core */
-	brcmf_sdio_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
-			 SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK, NULL);
-	regdata = brcmf_sdio_regrl(sdiodev,
-				   CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
-				   NULL);
+	brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+			  SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK, NULL);
+	regdata = brcmf_sdiod_regrl(sdiodev,
+				    CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+				    NULL);
 	udelay(1);
 
 	/* leave clock enabled */
-	brcmf_sdio_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
-			 SSB_TMSLOW_CLOCK, NULL);
-	regdata = brcmf_sdio_regrl(sdiodev,
-				   CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
-				   NULL);
+	brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+			  SSB_TMSLOW_CLOCK, NULL);
+	regdata = brcmf_sdiod_regrl(sdiodev,
+				    CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+				    NULL);
 	udelay(1);
 }
 
 static void
 brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
-			struct chip_info *ci, u16 coreid, u32 core_bits)
+			struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+			u32 in_resetbits, u32 post_resetbits)
 {
 	u8 idx;
 	u32 regdata;
+	u32 wrapbase;
 
 	idx = brcmf_sdio_chip_getinfidx(ci, coreid);
 	if (idx == BRCMF_MAX_CORENUM)
 		return;
 
+	wrapbase = ci->c_inf[idx].wrapbase;
+
 	/* must disable first to work for arbitrary current core state */
-	brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, core_bits);
-
-	/* now do initialization sequence */
-	brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
-			 core_bits | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
-	regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
-				   NULL);
-	brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
-			 0, NULL);
-	regdata = brcmf_sdio_regrl(sdiodev,
-				   ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
-				   NULL);
-	udelay(1);
+	brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, pre_resetbits,
+				  in_resetbits);
 
-	brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
-			 core_bits | BCMA_IOCTL_CLK, NULL);
-	regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
-				   NULL);
-	udelay(1);
+	while (brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) &
+	       BCMA_RESET_CTL_RESET) {
+		brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL, 0, NULL);
+		usleep_range(40, 60);
+	}
+
+	brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, post_resetbits |
+			  BCMA_IOCTL_CLK, NULL);
+	regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
 }
 
 #ifdef DEBUG
 /* safety check for chipinfo */
-static int brcmf_sdio_chip_cichk(struct chip_info *ci)
+static int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
 {
 	u8 core_idx;
 
@@ -431,172 +452,213 @@ static int brcmf_sdio_chip_cichk(struct chip_info *ci)
 	return 0;
 }
 #else	/* DEBUG */
-static inline int brcmf_sdio_chip_cichk(struct chip_info *ci)
+static inline int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
 {
 	return 0;
 }
 #endif
 
 static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
-				       struct chip_info *ci, u32 regs)
+				       struct brcmf_chip *ci)
 {
 	u32 regdata;
-	int ret;
+	u32 socitype;
 
 	/* Get CC core rev
-	 * Chipid is assume to be at offset 0 from regs arg
+	 * Chipid is assume to be at offset 0 from SI_ENUM_BASE
 	 * For different chiptypes or old sdio hosts w/o chipcommon,
 	 * other ways of recognition should be added here.
 	 */
-	ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
-	ci->c_inf[0].base = regs;
-	regdata = brcmf_sdio_regrl(sdiodev,
-				   CORE_CC_REG(ci->c_inf[0].base, chipid),
-				   NULL);
+	regdata = brcmf_sdiod_regrl(sdiodev,
+				    CORE_CC_REG(SI_ENUM_BASE, chipid),
+				    NULL);
 	ci->chip = regdata & CID_ID_MASK;
 	ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
 	if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
 	    ci->chiprev >= 2)
 		ci->chip = BCM4339_CHIP_ID;
-	ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+	socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
 
-	brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
+	brcmf_dbg(INFO, "found %s chip: id=0x%x, rev=%d\n",
+		  socitype == SOCI_SB ? "SB" : "AXI", ci->chip, ci->chiprev);
 
-	/* Address of cores for new chips should be added here */
-	switch (ci->chip) {
-	case BCM43143_CHIP_ID:
-		ci->c_inf[0].wrapbase = ci->c_inf[0].base + 0x00100000;
-		ci->c_inf[0].cib = 0x2b000000;
-		ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
-		ci->c_inf[1].base = BCM43143_CORE_BUS_BASE;
-		ci->c_inf[1].wrapbase = ci->c_inf[1].base + 0x00100000;
-		ci->c_inf[1].cib = 0x18000000;
-		ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
-		ci->c_inf[2].base = BCM43143_CORE_SOCRAM_BASE;
-		ci->c_inf[2].wrapbase = ci->c_inf[2].base + 0x00100000;
-		ci->c_inf[2].cib = 0x14000000;
-		ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
-		ci->c_inf[3].base = BCM43143_CORE_ARM_BASE;
-		ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
-		ci->c_inf[3].cib = 0x07000000;
-		ci->ramsize = BCM43143_RAMSIZE;
-		break;
-	case BCM43241_CHIP_ID:
-		ci->c_inf[0].wrapbase = 0x18100000;
-		ci->c_inf[0].cib = 0x2a084411;
-		ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
-		ci->c_inf[1].base = 0x18002000;
-		ci->c_inf[1].wrapbase = 0x18102000;
-		ci->c_inf[1].cib = 0x0e004211;
-		ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
-		ci->c_inf[2].base = 0x18004000;
-		ci->c_inf[2].wrapbase = 0x18104000;
-		ci->c_inf[2].cib = 0x14080401;
-		ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
-		ci->c_inf[3].base = 0x18003000;
-		ci->c_inf[3].wrapbase = 0x18103000;
-		ci->c_inf[3].cib = 0x07004211;
-		ci->ramsize = 0x90000;
-		break;
-	case BCM4329_CHIP_ID:
+	if (socitype == SOCI_SB) {
+		if (ci->chip != BCM4329_CHIP_ID) {
+			brcmf_err("SB chip is not supported\n");
+			return -ENODEV;
+		}
+		ci->iscoreup = brcmf_sdio_sb_iscoreup;
+		ci->corerev = brcmf_sdio_sb_corerev;
+		ci->coredisable = brcmf_sdio_sb_coredisable;
+		ci->resetcore = brcmf_sdio_sb_resetcore;
+
+		ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
+		ci->c_inf[0].base = SI_ENUM_BASE;
 		ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
 		ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
 		ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
 		ci->c_inf[2].base = BCM4329_CORE_SOCRAM_BASE;
 		ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
 		ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
+		ci->c_inf[4].id = BCMA_CORE_80211;
+		ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
 		ci->ramsize = BCM4329_RAMSIZE;
-		break;
-	case BCM4330_CHIP_ID:
-		ci->c_inf[0].wrapbase = 0x18100000;
-		ci->c_inf[0].cib = 0x27004211;
-		ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
-		ci->c_inf[1].base = 0x18002000;
-		ci->c_inf[1].wrapbase = 0x18102000;
-		ci->c_inf[1].cib = 0x07004211;
-		ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
-		ci->c_inf[2].base = 0x18004000;
-		ci->c_inf[2].wrapbase = 0x18104000;
-		ci->c_inf[2].cib = 0x0d080401;
-		ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
-		ci->c_inf[3].base = 0x18003000;
-		ci->c_inf[3].wrapbase = 0x18103000;
-		ci->c_inf[3].cib = 0x03004211;
-		ci->ramsize = 0x48000;
-		break;
-	case BCM4334_CHIP_ID:
-		ci->c_inf[0].wrapbase = 0x18100000;
-		ci->c_inf[0].cib = 0x29004211;
-		ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
-		ci->c_inf[1].base = 0x18002000;
-		ci->c_inf[1].wrapbase = 0x18102000;
-		ci->c_inf[1].cib = 0x0d004211;
-		ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
-		ci->c_inf[2].base = 0x18004000;
-		ci->c_inf[2].wrapbase = 0x18104000;
-		ci->c_inf[2].cib = 0x13080401;
-		ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
-		ci->c_inf[3].base = 0x18003000;
-		ci->c_inf[3].wrapbase = 0x18103000;
-		ci->c_inf[3].cib = 0x07004211;
-		ci->ramsize = 0x80000;
-		break;
-	case BCM4335_CHIP_ID:
-		ci->c_inf[0].wrapbase = 0x18100000;
-		ci->c_inf[0].cib = 0x2b084411;
-		ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
-		ci->c_inf[1].base = 0x18005000;
-		ci->c_inf[1].wrapbase = 0x18105000;
-		ci->c_inf[1].cib = 0x0f004211;
-		ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
-		ci->c_inf[2].base = 0x18002000;
-		ci->c_inf[2].wrapbase = 0x18102000;
-		ci->c_inf[2].cib = 0x01084411;
-		ci->ramsize = 0xc0000;
-		ci->rambase = 0x180000;
-		break;
-	case BCM4339_CHIP_ID:
-		ci->c_inf[0].wrapbase = 0x18100000;
-		ci->c_inf[0].cib = 0x2e084411;
-		ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
-		ci->c_inf[1].base = 0x18005000;
-		ci->c_inf[1].wrapbase = 0x18105000;
-		ci->c_inf[1].cib = 0x15004211;
-		ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
-		ci->c_inf[2].base = 0x18002000;
-		ci->c_inf[2].wrapbase = 0x18102000;
-		ci->c_inf[2].cib = 0x04084411;
-		ci->ramsize = 0xc0000;
-		ci->rambase = 0x180000;
-		break;
-	default:
-		brcmf_err("chipid 0x%x is not supported\n", ci->chip);
-		return -ENODEV;
-	}
-
-	ret = brcmf_sdio_chip_cichk(ci);
-	if (ret)
-		return ret;
-
-	switch (ci->socitype) {
-	case SOCI_SB:
-		ci->iscoreup = brcmf_sdio_sb_iscoreup;
-		ci->corerev = brcmf_sdio_sb_corerev;
-		ci->coredisable = brcmf_sdio_sb_coredisable;
-		ci->resetcore = brcmf_sdio_sb_resetcore;
-		break;
-	case SOCI_AI:
+	} else if (socitype == SOCI_AI) {
 		ci->iscoreup = brcmf_sdio_ai_iscoreup;
 		ci->corerev = brcmf_sdio_ai_corerev;
 		ci->coredisable = brcmf_sdio_ai_coredisable;
 		ci->resetcore = brcmf_sdio_ai_resetcore;
-		break;
-	default:
-		brcmf_err("socitype %u not supported\n", ci->socitype);
+
+		ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
+		ci->c_inf[0].base = SI_ENUM_BASE;
+
+		/* Address of cores for new chips should be added here */
+		switch (ci->chip) {
+		case BCM43143_CHIP_ID:
+			ci->c_inf[0].wrapbase = ci->c_inf[0].base + 0x00100000;
+			ci->c_inf[0].cib = 0x2b000000;
+			ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+			ci->c_inf[1].base = BCM43143_CORE_BUS_BASE;
+			ci->c_inf[1].wrapbase = ci->c_inf[1].base + 0x00100000;
+			ci->c_inf[1].cib = 0x18000000;
+			ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+			ci->c_inf[2].base = BCM43143_CORE_SOCRAM_BASE;
+			ci->c_inf[2].wrapbase = ci->c_inf[2].base + 0x00100000;
+			ci->c_inf[2].cib = 0x14000000;
+			ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+			ci->c_inf[3].base = BCM43143_CORE_ARM_BASE;
+			ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
+			ci->c_inf[3].cib = 0x07000000;
+			ci->c_inf[4].id = BCMA_CORE_80211;
+			ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+			ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+			ci->ramsize = BCM43143_RAMSIZE;
+			break;
+		case BCM43241_CHIP_ID:
+			ci->c_inf[0].wrapbase = 0x18100000;
+			ci->c_inf[0].cib = 0x2a084411;
+			ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+			ci->c_inf[1].base = 0x18002000;
+			ci->c_inf[1].wrapbase = 0x18102000;
+			ci->c_inf[1].cib = 0x0e004211;
+			ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+			ci->c_inf[2].base = 0x18004000;
+			ci->c_inf[2].wrapbase = 0x18104000;
+			ci->c_inf[2].cib = 0x14080401;
+			ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+			ci->c_inf[3].base = 0x18003000;
+			ci->c_inf[3].wrapbase = 0x18103000;
+			ci->c_inf[3].cib = 0x07004211;
+			ci->c_inf[4].id = BCMA_CORE_80211;
+			ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+			ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+			ci->ramsize = 0x90000;
+			break;
+		case BCM4330_CHIP_ID:
+			ci->c_inf[0].wrapbase = 0x18100000;
+			ci->c_inf[0].cib = 0x27004211;
+			ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+			ci->c_inf[1].base = 0x18002000;
+			ci->c_inf[1].wrapbase = 0x18102000;
+			ci->c_inf[1].cib = 0x07004211;
+			ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+			ci->c_inf[2].base = 0x18004000;
+			ci->c_inf[2].wrapbase = 0x18104000;
+			ci->c_inf[2].cib = 0x0d080401;
+			ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+			ci->c_inf[3].base = 0x18003000;
+			ci->c_inf[3].wrapbase = 0x18103000;
+			ci->c_inf[3].cib = 0x03004211;
+			ci->c_inf[4].id = BCMA_CORE_80211;
+			ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+			ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+			ci->ramsize = 0x48000;
+			break;
+		case BCM4334_CHIP_ID:
+			ci->c_inf[0].wrapbase = 0x18100000;
+			ci->c_inf[0].cib = 0x29004211;
+			ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+			ci->c_inf[1].base = 0x18002000;
+			ci->c_inf[1].wrapbase = 0x18102000;
+			ci->c_inf[1].cib = 0x0d004211;
+			ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+			ci->c_inf[2].base = 0x18004000;
+			ci->c_inf[2].wrapbase = 0x18104000;
+			ci->c_inf[2].cib = 0x13080401;
+			ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+			ci->c_inf[3].base = 0x18003000;
+			ci->c_inf[3].wrapbase = 0x18103000;
+			ci->c_inf[3].cib = 0x07004211;
+			ci->c_inf[4].id = BCMA_CORE_80211;
+			ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+			ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+			ci->ramsize = 0x80000;
+			break;
+		case BCM4335_CHIP_ID:
+			ci->c_inf[0].wrapbase = 0x18100000;
+			ci->c_inf[0].cib = 0x2b084411;
+			ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+			ci->c_inf[1].base = 0x18005000;
+			ci->c_inf[1].wrapbase = 0x18105000;
+			ci->c_inf[1].cib = 0x0f004211;
+			ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
+			ci->c_inf[2].base = 0x18002000;
+			ci->c_inf[2].wrapbase = 0x18102000;
+			ci->c_inf[2].cib = 0x01084411;
+			ci->c_inf[3].id = BCMA_CORE_80211;
+			ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
+			ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
+			ci->ramsize = 0xc0000;
+			ci->rambase = 0x180000;
+			break;
+		case BCM43362_CHIP_ID:
+			ci->c_inf[0].wrapbase = 0x18100000;
+			ci->c_inf[0].cib = 0x27004211;
+			ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+			ci->c_inf[1].base = 0x18002000;
+			ci->c_inf[1].wrapbase = 0x18102000;
+			ci->c_inf[1].cib = 0x0a004211;
+			ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+			ci->c_inf[2].base = 0x18004000;
+			ci->c_inf[2].wrapbase = 0x18104000;
+			ci->c_inf[2].cib = 0x08080401;
+			ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+			ci->c_inf[3].base = 0x18003000;
+			ci->c_inf[3].wrapbase = 0x18103000;
+			ci->c_inf[3].cib = 0x03004211;
+			ci->c_inf[4].id = BCMA_CORE_80211;
+			ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
+			ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
+			ci->ramsize = 0x3C000;
+			break;
+		case BCM4339_CHIP_ID:
+			ci->c_inf[0].wrapbase = 0x18100000;
+			ci->c_inf[0].cib = 0x2e084411;
+			ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+			ci->c_inf[1].base = 0x18005000;
+			ci->c_inf[1].wrapbase = 0x18105000;
+			ci->c_inf[1].cib = 0x15004211;
+			ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
+			ci->c_inf[2].base = 0x18002000;
+			ci->c_inf[2].wrapbase = 0x18102000;
+			ci->c_inf[2].cib = 0x04084411;
+			ci->c_inf[3].id = BCMA_CORE_80211;
+			ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
+			ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
+			ci->ramsize = 0xc0000;
+			ci->rambase = 0x180000;
+			break;
+		default:
+			brcmf_err("AXI chip is not supported\n");
+			return -ENODEV;
+		}
+	} else {
+		brcmf_err("chip backplane type %u is not supported\n",
+			  socitype);
 		return -ENODEV;
 	}
 
-	return 0;
+	return brcmf_sdio_chip_cichk(ci);
 }
 
 static int
@@ -607,7 +669,7 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
 
 	/* Try forcing SDIO core to do ALPAvail request only */
 	clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
-	brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
 	if (err) {
 		brcmf_err("error writing for HT off\n");
 		return err;
@@ -615,8 +677,8 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
 
 	/* If register supported, wait for ALPAvail and then force ALP */
 	/* This may take up to 15 milliseconds */
-	clkval = brcmf_sdio_regrb(sdiodev,
-				  SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+	clkval = brcmf_sdiod_regrb(sdiodev,
+				   SBSDIO_FUNC1_CHIPCLKCSR, NULL);
 
 	if ((clkval & ~SBSDIO_AVBITS) != clkset) {
 		brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
@@ -624,8 +686,8 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
 		return -EACCES;
 	}
 
-	SPINWAIT(((clkval = brcmf_sdio_regrb(sdiodev,
-					     SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+	SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
+					      SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
 			!SBSDIO_ALPAV(clkval)),
 			PMU_MAX_TRANSITION_DLY);
 	if (!SBSDIO_ALPAV(clkval)) {
@@ -635,18 +697,18 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
 	}
 
 	clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
-	brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
 	udelay(65);
 
 	/* Also, disable the extra SDIO pull-ups */
-	brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
 
 	return 0;
 }
 
 static void
 brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
-			     struct chip_info *ci)
+			     struct brcmf_chip *ci)
 {
 	u32 base = ci->c_inf[0].base;
 
@@ -654,16 +716,16 @@ brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
 	ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
 
 	/* get chipcommon capabilites */
-	ci->c_inf[0].caps = brcmf_sdio_regrl(sdiodev,
-					     CORE_CC_REG(base, capabilities),
-					     NULL);
+	ci->c_inf[0].caps = brcmf_sdiod_regrl(sdiodev,
+					      CORE_CC_REG(base, capabilities),
+					      NULL);
 
 	/* get pmu caps & rev */
 	if (ci->c_inf[0].caps & CC_CAP_PMU) {
 		ci->pmucaps =
-			brcmf_sdio_regrl(sdiodev,
-					 CORE_CC_REG(base, pmucapabilities),
-					 NULL);
+			brcmf_sdiod_regrl(sdiodev,
+					  CORE_CC_REG(base, pmucapabilities),
+					  NULL);
 		ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
 	}
 
@@ -677,19 +739,18 @@ brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
 	 * Make sure any on-chip ARM is off (in case strapping is wrong),
 	 * or downloaded code was already running.
 	 */
-	ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0);
+	ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
 }
 
 int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
-			   struct chip_info **ci_ptr, u32 regs)
+			   struct brcmf_chip **ci_ptr)
 {
 	int ret;
-	struct chip_info *ci;
+	struct brcmf_chip *ci;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
-	/* alloc chip_info_t */
-	ci = kzalloc(sizeof(struct chip_info), GFP_ATOMIC);
+	ci = kzalloc(sizeof(*ci), GFP_ATOMIC);
 	if (!ci)
 		return -ENOMEM;
 
@@ -697,16 +758,16 @@ int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
 	if (ret != 0)
 		goto err;
 
-	ret = brcmf_sdio_chip_recognition(sdiodev, ci, regs);
+	ret = brcmf_sdio_chip_recognition(sdiodev, ci);
 	if (ret != 0)
 		goto err;
 
 	brcmf_sdio_chip_buscoresetup(sdiodev, ci);
 
-	brcmf_sdio_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopullup),
-			 0, NULL);
-	brcmf_sdio_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopulldown),
-			 0, NULL);
+	brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopullup),
+			  0, NULL);
+	brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopulldown),
+			  0, NULL);
 
 	*ci_ptr = ci;
 	return 0;
@@ -717,7 +778,7 @@ err:
 }
 
 void
-brcmf_sdio_chip_detach(struct chip_info **ci_ptr)
+brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr)
 {
 	brcmf_dbg(TRACE, "Enter\n");
 
@@ -736,7 +797,7 @@ static char *brcmf_sdio_chip_name(uint chipid, char *buf, uint len)
 
 void
 brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
-				  struct chip_info *ci, u32 drivestrength)
+				  struct brcmf_chip *ci, u32 drivestrength)
 {
 	const struct sdiod_drive_str *str_tab = NULL;
 	u32 str_mask;
@@ -757,6 +818,11 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
 		str_mask = 0x00003800;
 		str_shift = 11;
 		break;
+	case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
+		str_tab = sdiod_drvstr_tab6_1v8;
+		str_mask = 0x00001800;
+		str_shift = 11;
+		break;
 	case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
 		/* note: 43143 does not support tristate */
 		i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
@@ -769,6 +835,11 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
 				  brcmf_sdio_chip_name(ci->chip, chn, 8),
 				  drivestrength);
 		break;
+	case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+		str_tab = sdiod_drive_strength_tab5_1v8;
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
 	default:
 		brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
 			  brcmf_sdio_chip_name(ci->chip, chn, 8),
@@ -784,119 +855,31 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
 			}
 		}
 		addr = CORE_CC_REG(base, chipcontrol_addr);
-		brcmf_sdio_regwl(sdiodev, addr, 1, NULL);
-		cc_data_temp = brcmf_sdio_regrl(sdiodev, addr, NULL);
+		brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
+		cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
 		cc_data_temp &= ~str_mask;
 		drivestrength_sel <<= str_shift;
 		cc_data_temp |= drivestrength_sel;
-		brcmf_sdio_regwl(sdiodev, addr, cc_data_temp, NULL);
+		brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
 
 		brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
 			  str_tab[i].strength, drivestrength, cc_data_temp);
 	}
 }
 
-#ifdef DEBUG
-static bool
-brcmf_sdio_chip_verifynvram(struct brcmf_sdio_dev *sdiodev, u32 nvram_addr,
-			    char *nvram_dat, uint nvram_sz)
-{
-	char *nvram_ularray;
-	int err;
-	bool ret = true;
-
-	/* read back and verify */
-	brcmf_dbg(INFO, "Compare NVRAM dl & ul; size=%d\n", nvram_sz);
-	nvram_ularray = kmalloc(nvram_sz, GFP_KERNEL);
-	/* do not proceed while no memory but  */
-	if (!nvram_ularray)
-		return true;
-
-	/* Upload image to verify downloaded contents. */
-	memset(nvram_ularray, 0xaa, nvram_sz);
-
-	/* Read the vars list to temp buffer for comparison */
-	err = brcmf_sdio_ramrw(sdiodev, false, nvram_addr, nvram_ularray,
-			       nvram_sz);
-	if (err) {
-		brcmf_err("error %d on reading %d nvram bytes at 0x%08x\n",
-			  err, nvram_sz, nvram_addr);
-	} else if (memcmp(nvram_dat, nvram_ularray, nvram_sz)) {
-		brcmf_err("Downloaded NVRAM image is corrupted\n");
-		ret = false;
-	}
-	kfree(nvram_ularray);
-
-	return ret;
-}
-#else	/* DEBUG */
-static inline bool
-brcmf_sdio_chip_verifynvram(struct brcmf_sdio_dev *sdiodev, u32 nvram_addr,
-			    char *nvram_dat, uint nvram_sz)
-{
-	return true;
-}
-#endif	/* DEBUG */
-
-static bool brcmf_sdio_chip_writenvram(struct brcmf_sdio_dev *sdiodev,
-				       struct chip_info *ci,
-				       char *nvram_dat, uint nvram_sz)
-{
-	int err;
-	u32 nvram_addr;
-	u32 token;
-	__le32 token_le;
-
-	nvram_addr = (ci->ramsize - 4) - nvram_sz + ci->rambase;
-
-	/* Write the vars list */
-	err = brcmf_sdio_ramrw(sdiodev, true, nvram_addr, nvram_dat, nvram_sz);
-	if (err) {
-		brcmf_err("error %d on writing %d nvram bytes at 0x%08x\n",
-			  err, nvram_sz, nvram_addr);
-		return false;
-	}
-
-	if (!brcmf_sdio_chip_verifynvram(sdiodev, nvram_addr,
-					 nvram_dat, nvram_sz))
-		return false;
-
-	/* generate token:
-	 * nvram size, converted to words, in lower 16-bits, checksum
-	 * in upper 16-bits.
-	 */
-	token = nvram_sz / 4;
-	token = (~token << 16) | (token & 0x0000FFFF);
-	token_le = cpu_to_le32(token);
-
-	brcmf_dbg(INFO, "RAM size: %d\n", ci->ramsize);
-	brcmf_dbg(INFO, "nvram is placed at %d, size %d, token=0x%08x\n",
-		  nvram_addr, nvram_sz, token);
-
-	/* Write the length token to the last word */
-	if (brcmf_sdio_ramrw(sdiodev, true, (ci->ramsize - 4 + ci->rambase),
-			     (u8 *)&token_le, 4))
-		return false;
-
-	return true;
-}
-
 static void
 brcmf_sdio_chip_cm3_enterdl(struct brcmf_sdio_dev *sdiodev,
-			    struct chip_info *ci)
+			    struct brcmf_chip *ci)
 {
-	u32 zeros = 0;
-
-	ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0);
-	ci->resetcore(sdiodev, ci, BCMA_CORE_INTERNAL_MEM, 0);
-
-	/* clear length token */
-	brcmf_sdio_ramrw(sdiodev, true, ci->ramsize - 4, (u8 *)&zeros, 4);
+	ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
+	ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
+		      D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
+		      D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
+	ci->resetcore(sdiodev, ci, BCMA_CORE_INTERNAL_MEM, 0, 0, 0);
 }
 
-static bool
-brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
-			   char *nvram_dat, uint nvram_sz)
+static bool brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev,
+				       struct brcmf_chip *ci)
 {
 	u8 core_idx;
 	u32 reg_addr;
@@ -906,56 +889,64 @@ brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
 		return false;
 	}
 
-	if (!brcmf_sdio_chip_writenvram(sdiodev, ci, nvram_dat, nvram_sz))
-		return false;
-
 	/* clear all interrupts */
 	core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
 	reg_addr = ci->c_inf[core_idx].base;
 	reg_addr += offsetof(struct sdpcmd_regs, intstatus);
-	brcmf_sdio_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+	brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
 
-	ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CM3, 0);
+	ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0, 0);
 
 	return true;
 }
 
 static inline void
 brcmf_sdio_chip_cr4_enterdl(struct brcmf_sdio_dev *sdiodev,
-			    struct chip_info *ci)
+			    struct brcmf_chip *ci)
 {
-	ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4,
-		      ARMCR4_BCMA_IOCTL_CPUHALT);
+	u8 idx;
+	u32 regdata;
+	u32 wrapbase;
+	idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
+
+	if (idx == BRCMF_MAX_CORENUM)
+		return;
+
+	wrapbase = ci->c_inf[idx].wrapbase;
+	regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
+	regdata &= ARMCR4_BCMA_IOCTL_CPUHALT;
+	ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, regdata,
+		      ARMCR4_BCMA_IOCTL_CPUHALT, ARMCR4_BCMA_IOCTL_CPUHALT);
+	ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
+		      D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
+		      D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
 }
 
-static bool
-brcmf_sdio_chip_cr4_exitdl(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
-			   char *nvram_dat, uint nvram_sz)
+static bool brcmf_sdio_chip_cr4_exitdl(struct brcmf_sdio_dev *sdiodev,
+				       struct brcmf_chip *ci, u32 rstvec)
 {
 	u8 core_idx;
 	u32 reg_addr;
 
-	if (!brcmf_sdio_chip_writenvram(sdiodev, ci, nvram_dat, nvram_sz))
-		return false;
-
 	/* clear all interrupts */
 	core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
 	reg_addr = ci->c_inf[core_idx].base;
 	reg_addr += offsetof(struct sdpcmd_regs, intstatus);
-	brcmf_sdio_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+	brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
 
 	/* Write reset vector to address 0 */
-	brcmf_sdio_ramrw(sdiodev, true, 0, (void *)&ci->rst_vec,
-			 sizeof(ci->rst_vec));
+	brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
+			  sizeof(rstvec));
 
 	/* restore ARM */
-	ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, 0);
+	ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, ARMCR4_BCMA_IOCTL_CPUHALT,
+		      0, 0);
 
 	return true;
 }
 
 void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
-				    struct chip_info *ci)
+				    struct brcmf_chip *ci)
 {
 	u8 arm_core_idx;
 
@@ -969,15 +960,13 @@ void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
 }
 
 bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
-				   struct chip_info *ci, char *nvram_dat,
-				   uint nvram_sz)
+				   struct brcmf_chip *ci, u32 rstvec)
 {
 	u8 arm_core_idx;
 
 	arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
 	if (BRCMF_MAX_CORENUM != arm_core_idx)
-		return brcmf_sdio_chip_cm3_exitdl(sdiodev, ci, nvram_dat,
-						  nvram_sz);
+		return brcmf_sdio_chip_cm3_exitdl(sdiodev, ci);
 
-	return brcmf_sdio_chip_cr4_exitdl(sdiodev, ci, nvram_dat, nvram_sz);
+	return brcmf_sdio_chip_cr4_exitdl(sdiodev, ci, rstvec);
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
index 507c61c991fa..fb0614329ede 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
@@ -54,15 +54,7 @@
 
 #define BRCMF_MAX_CORENUM	6
 
-/* SDIO device ID */
-#define SDIO_DEVICE_ID_BROADCOM_43143		43143
-#define SDIO_DEVICE_ID_BROADCOM_43241		0x4324
-#define SDIO_DEVICE_ID_BROADCOM_4329		0x4329
-#define SDIO_DEVICE_ID_BROADCOM_4330		0x4330
-#define SDIO_DEVICE_ID_BROADCOM_4334		0x4334
-#define SDIO_DEVICE_ID_BROADCOM_4335_4339	0x4335
-
-struct chip_core_info {
+struct brcmf_core {
 	u16 id;
 	u16 rev;
 	u32 base;
@@ -71,27 +63,28 @@ struct chip_core_info {
 	u32 cib;
 };
 
-struct chip_info {
+struct brcmf_chip {
 	u32 chip;
 	u32 chiprev;
-	u32 socitype;
 	/* core info */
 	/* always put chipcommon core at 0, bus core at 1 */
-	struct chip_core_info c_inf[BRCMF_MAX_CORENUM];
+	struct brcmf_core c_inf[BRCMF_MAX_CORENUM];
 	u32 pmurev;
 	u32 pmucaps;
 	u32 ramsize;
 	u32 rambase;
 	u32 rst_vec;	/* reset vertor for ARM CR4 core */
 
-	bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
+	bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
 			 u16 coreid);
-	u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
+	u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
 			 u16 coreid);
 	void (*coredisable)(struct brcmf_sdio_dev *sdiodev,
-			struct chip_info *ci, u16 coreid, u32 core_bits);
+			struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+			u32 in_resetbits);
 	void (*resetcore)(struct brcmf_sdio_dev *sdiodev,
-			struct chip_info *ci, u16 coreid, u32 core_bits);
+			struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
+			u32 in_resetbits, u32 post_resetbits);
 };
 
 struct sbconfig {
@@ -224,15 +217,15 @@ struct sdpcmd_regs {
 };
 
 int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
-			   struct chip_info **ci_ptr, u32 regs);
-void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
+			   struct brcmf_chip **ci_ptr);
+void brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr);
 void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
-				       struct chip_info *ci, u32 drivestrength);
-u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
+				       struct brcmf_chip *ci,
+				       u32 drivestrength);
+u8 brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid);
 void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
-				    struct chip_info *ci);
+				    struct brcmf_chip *ci);
 bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
-				   struct chip_info *ci, char *nvram_dat,
-				   uint nvram_sz);
+				   struct brcmf_chip *ci, u32 rstvec);
 
 #endif		/* _BRCMFMAC_SDIO_CHIP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index fc0d4f0129db..092e9c824992 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -164,11 +164,9 @@ struct brcmf_sdio;
 struct brcmf_sdio_dev {
 	struct sdio_func *func[SDIO_MAX_FUNCS];
 	u8 num_funcs;			/* Supported funcs on client */
-	u32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
 	u32 sbwad;			/* Save backplane window address */
-	void *bus;
+	struct brcmf_sdio *bus;
 	atomic_t suspend;		/* suspend flag */
-	wait_queue_head_t request_byte_wait;
 	wait_queue_head_t request_word_wait;
 	wait_queue_head_t request_buffer_wait;
 	struct device *dev;
@@ -185,22 +183,19 @@ struct brcmf_sdio_dev {
 };
 
 /* Register/deregister interrupt handler. */
-int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
-int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);
 
 /* sdio device register access interface */
-u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
-		      int *ret);
-void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
-		      int *ret);
-int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
-			    void *data, bool write);
+u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
+		       int *ret);
+void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
+		       int *ret);
 
 /* Buffer transfer to/from device (client) core via cmd53.
  *   fn:       function number
- *   addr:     backplane address (i.e. >= regsva from attach)
  *   flags:    backplane width, address increment, sync/async
  *   buf:      pointer to memory data buffer
  *   nbytes:   number of bytes to transfer to/from buf
@@ -210,17 +205,14 @@ int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
  * Returns 0 or error code.
  * NOTE: Async operation is not currently supported.
  */
-int brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-			  uint flags, struct sk_buff_head *pktq);
-int brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-			  uint flags, u8 *buf, uint nbytes);
-
-int brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-			  uint flags, struct sk_buff *pkt);
-int brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-			  uint flags, u8 *buf, uint nbytes);
-int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-			    uint flags, struct sk_buff_head *pktq, uint totlen);
+int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
+			 struct sk_buff_head *pktq);
+int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes);
+
+int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt);
+int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes);
+int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
+			   struct sk_buff_head *pktq, uint totlen);
 
 /* Flags bits */
 
@@ -236,43 +228,16 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
  *   nbytes:   number of bytes to transfer to/from buf
  * Returns 0 or error code.
  */
-int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
-			u8 *buf, uint nbytes);
-int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
-		     u8 *data, uint size);
+int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
+		      u8 *data, uint size);
 
 /* Issue an abort to the specified function */
-int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
-
-/* platform specific/high level functions */
-int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
-int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
-
-/* attach, return handler on success, NULL if failed.
- *  The handler shall be provided by all subsequent calls. No local cache
- *  cfghdl points to the starting address of pci device mapped memory
- */
-int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
-void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
-
-/* read or write one byte using cmd52 */
-int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
-			     uint addr, u8 *byte);
-
-/* read or write 2/4 bytes using cmd53 */
-int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
-			     uint addr, u32 *word, uint nbyte);
-
-/* Watchdog timer interface for pm ops */
-void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable);
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
 
-void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
-void brcmf_sdbrcm_disconnect(void *ptr);
-void brcmf_sdbrcm_isr(void *arg);
+struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdio_remove(struct brcmf_sdio *bus);
+void brcmf_sdio_isr(struct brcmf_sdio *bus);
 
-void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
+void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick);
 
-void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
-			  wait_queue_head_t *wq);
-bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
 #endif				/* _BRCM_SDH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
index 3c67529b9074..4d7d51f95716 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
@@ -89,7 +89,7 @@ TRACE_EVENT(brcmf_hexdump,
 	TP_printk("hexdump [addr=%lx, length=%lu]", __entry->addr, __entry->len)
 );
 
-TRACE_EVENT(brcmf_bdchdr,
+TRACE_EVENT(brcmf_bcdchdr,
 	TP_PROTO(void *data),
 	TP_ARGS(data),
 	TP_STRUCT__entry(
@@ -107,24 +107,35 @@ TRACE_EVENT(brcmf_bdchdr,
 		memcpy(__get_dynamic_array(signal),
 		       (u8 *)data + 4, __entry->siglen);
 	),
-	TP_printk("bdc: prio=%d siglen=%d", __entry->prio, __entry->siglen)
+	TP_printk("bcdc: prio=%d siglen=%d", __entry->prio, __entry->siglen)
 );
 
+#ifndef SDPCM_RX
+#define SDPCM_RX	0
+#endif
+#ifndef SDPCM_TX
+#define SDPCM_TX	1
+#endif
+#ifndef SDPCM_GLOM
+#define SDPCM_GLOM	2
+#endif
+
 TRACE_EVENT(brcmf_sdpcm_hdr,
-	TP_PROTO(bool tx, void *data),
-	TP_ARGS(tx, data),
+	TP_PROTO(u8 dir, void *data),
+	TP_ARGS(dir, data),
 	TP_STRUCT__entry(
-		__field(u8, tx)
+		__field(u8, dir)
 		__field(u16, len)
-		__array(u8, hdr, 12)
+		__dynamic_array(u8, hdr, dir == SDPCM_GLOM ? 20 : 12)
 	),
 	TP_fast_assign(
-		memcpy(__entry->hdr, data, 12);
-		__entry->len = __entry->hdr[0] | (__entry->hdr[1] << 8);
-		__entry->tx = tx ? 1 : 0;
+		memcpy(__get_dynamic_array(hdr), data, dir == SDPCM_GLOM ? 20 : 12);
+		__entry->len = *(u8 *)data | (*((u8 *)data + 1) << 8);
+		__entry->dir = dir;
 	),
-	TP_printk("sdpcm: %s len %u, seq %d", __entry->tx ? "TX" : "RX",
-		  __entry->len, __entry->hdr[4])
+	TP_printk("sdpcm: %s len %u, seq %d",
+		  __entry->dir == SDPCM_RX ? "RX" : "TX",
+		  __entry->len, ((u8 *)__get_dynamic_array(hdr))[4])
 );
 
 #ifdef CONFIG_BRCM_TRACING
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 422f44c63175..24f65cd53859 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -522,10 +522,10 @@ brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state)
 	/* update state of upper layer */
 	if (state == BRCMFMAC_USB_STATE_DOWN) {
 		brcmf_dbg(USB, "DBUS is down\n");
-		bcmf_bus->state = BRCMF_BUS_DOWN;
+		brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_DOWN);
 	} else if (state == BRCMFMAC_USB_STATE_UP) {
 		brcmf_dbg(USB, "DBUS is up\n");
-		bcmf_bus->state = BRCMF_BUS_DATA;
+		brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_DATA);
 	} else {
 		brcmf_dbg(USB, "DBUS current state=%d\n", state);
 	}
@@ -1253,9 +1253,10 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
 	bus->ops = &brcmf_usb_bus_ops;
 	bus->chip = bus_pub->devid;
 	bus->chiprev = bus_pub->chiprev;
+	bus->proto_type = BRCMF_PROTO_BCDC;
 
 	/* Attach to the common driver interface */
-	ret = brcmf_attach(0, dev);
+	ret = brcmf_attach(dev);
 	if (ret) {
 		brcmf_err("brcmf_attach failed\n");
 		goto fail;
@@ -1454,7 +1455,7 @@ static int brcmf_usb_resume(struct usb_interface *intf)
 	struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
 
 	brcmf_dbg(USB, "Enter\n");
-	if (!brcmf_attach(0, devinfo->dev))
+	if (!brcmf_attach(devinfo->dev))
 		return brcmf_bus_start(&usb->dev);
 
 	return 0;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 571f013cebbb..d7718a5fa2f0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -202,9 +202,9 @@ static struct ieee80211_supported_band __wl_band_5ghz_a = {
 
 /* This is to override regulatory domains defined in cfg80211 module (reg.c)
  * By default world regulatory domain defined in reg.c puts the flags
- * NL80211_RRF_PASSIVE_SCAN and NL80211_RRF_NO_IBSS for 5GHz channels (for
- * 36..48 and 149..165). With respect to these flags, wpa_supplicant doesn't
- * start p2p operations on 5GHz channels. All the changes in world regulatory
+ * NL80211_RRF_NO_IR for 5GHz channels (for * 36..48 and 149..165).
+ * With respect to these flags, wpa_supplicant doesn't * start p2p
+ * operations on 5GHz channels. All the changes in world regulatory
  * domain are to be done here.
  */
 static const struct ieee80211_regdomain brcmf_regdom = {
@@ -1095,10 +1095,10 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
 					     BRCMF_C_DISASSOC, NULL, 0);
 		if (err) {
 			brcmf_err("WLC_DISASSOC failed (%d)\n", err);
-			cfg80211_disconnected(vif->wdev.netdev, 0,
-					      NULL, 0, GFP_KERNEL);
 		}
 		clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
+		cfg80211_disconnected(vif->wdev.netdev, 0, NULL, 0, GFP_KERNEL);
+
 	}
 	clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
 	clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
@@ -1758,6 +1758,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
 		return -EIO;
 
 	clear_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
+	cfg80211_disconnected(ndev, reason_code, NULL, 0, GFP_KERNEL);
 
 	memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
 	scbval.val = cpu_to_le32(reason_code);
@@ -2556,8 +2557,8 @@ brcmf_compare_update_same_bss(struct brcmf_cfg80211_info *cfg,
 		ch_bss.band == ch_bss_info_le.band &&
 		bss_info_le->SSID_len == bss->SSID_len &&
 		!memcmp(bss_info_le->SSID, bss->SSID, bss_info_le->SSID_len)) {
-		if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) ==
-			(bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL)) {
+		if ((bss->flags & BRCMF_BSS_RSSI_ON_CHANNEL) ==
+			(bss_info_le->flags & BRCMF_BSS_RSSI_ON_CHANNEL)) {
 			s16 bss_rssi = le16_to_cpu(bss->RSSI);
 			s16 bss_info_rssi = le16_to_cpu(bss_info_le->RSSI);
 
@@ -2566,13 +2567,13 @@ brcmf_compare_update_same_bss(struct brcmf_cfg80211_info *cfg,
 			*/
 			if (bss_info_rssi > bss_rssi)
 				bss->RSSI = bss_info_le->RSSI;
-		} else if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) &&
-			(bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL) == 0) {
+		} else if ((bss->flags & BRCMF_BSS_RSSI_ON_CHANNEL) &&
+			(bss_info_le->flags & BRCMF_BSS_RSSI_ON_CHANNEL) == 0) {
 			/* preserve the on-channel rssi measurement
 			* if the new measurement is off channel
 			*/
 			bss->RSSI = bss_info_le->RSSI;
-			bss->flags |= WLC_BSS_RSSI_ON_CHANNEL;
+			bss->flags |= BRCMF_BSS_RSSI_ON_CHANNEL;
 		}
 		return 1;
 	}
@@ -2988,6 +2989,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
 		}
 
 		set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
+		cfg->escan_info.run = brcmf_run_escan;
 		err = brcmf_do_escan(cfg, wiphy, ifp, request);
 		if (err) {
 			clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
@@ -3973,11 +3975,12 @@ brcmf_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
 
 static int
 brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
-		       struct ieee80211_channel *chan, bool offchan,
-		       unsigned int wait, const u8 *buf, size_t len,
-		       bool no_cck, bool dont_wait_for_ack, u64 *cookie)
+		       struct cfg80211_mgmt_tx_params *params, u64 *cookie)
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct ieee80211_channel *chan = params->chan;
+	const u8 *buf = params->buf;
+	size_t len = params->len;
 	const struct ieee80211_mgmt *mgmt;
 	struct brcmf_cfg80211_vif *vif;
 	s32 err = 0;
@@ -4341,7 +4344,7 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
 	wiphy->max_remain_on_channel_duration = 5000;
 	brcmf_wiphy_pno_params(wiphy);
 	brcmf_dbg(INFO, "Registering custom regulatory\n");
-	wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+	wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
 	wiphy_apply_custom_regulatory(wiphy, &brcmf_regdom);
 	err = wiphy_register(wiphy);
 	if (err < 0) {
@@ -4358,9 +4361,6 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
 {
 	struct brcmf_cfg80211_vif *vif;
 
-	if (cfg->vif_cnt == BRCMF_IFACE_MAX_CNT)
-		return ERR_PTR(-ENOSPC);
-
 	brcmf_dbg(TRACE, "allocating virtual interface (size=%zu)\n",
 		  sizeof(*vif));
 	vif = kzalloc(sizeof(*vif), GFP_KERNEL);
@@ -4377,21 +4377,25 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
 	brcmf_init_prof(&vif->profile);
 
 	list_add_tail(&vif->list, &cfg->vif_list);
-	cfg->vif_cnt++;
 	return vif;
 }
 
-void brcmf_free_vif(struct brcmf_cfg80211_info *cfg,
-		    struct brcmf_cfg80211_vif *vif)
+void brcmf_free_vif(struct brcmf_cfg80211_vif *vif)
 {
 	list_del(&vif->list);
-	cfg->vif_cnt--;
-
 	kfree(vif);
-	if (!cfg->vif_cnt) {
-		wiphy_unregister(cfg->wiphy);
-		wiphy_free(cfg->wiphy);
-	}
+}
+
+void brcmf_cfg80211_free_netdev(struct net_device *ndev)
+{
+	struct brcmf_cfg80211_vif *vif;
+	struct brcmf_if *ifp;
+
+	ifp = netdev_priv(ndev);
+	vif = ifp->vif;
+
+	brcmf_free_vif(vif);
+	free_netdev(ndev);
 }
 
 static bool brcmf_is_linkup(const struct brcmf_event_msg *e)
@@ -4978,20 +4982,20 @@ cfg80211_p2p_attach_out:
 	wl_deinit_priv(cfg);
 
 cfg80211_attach_out:
-	brcmf_free_vif(cfg, vif);
+	brcmf_free_vif(vif);
 	return NULL;
 }
 
 void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
 {
-	struct brcmf_cfg80211_vif *vif;
-	struct brcmf_cfg80211_vif *tmp;
+	if (!cfg)
+		return;
 
-	wl_deinit_priv(cfg);
+	WARN_ON(!list_empty(&cfg->vif_list));
+	wiphy_unregister(cfg->wiphy);
 	brcmf_btcoex_detach(cfg);
-	list_for_each_entry_safe(vif, tmp, &cfg->vif_list, list) {
-		brcmf_free_vif(cfg, vif);
-	}
+	wl_deinit_priv(cfg);
+	wiphy_free(cfg->wiphy);
 }
 
 static s32
@@ -5086,7 +5090,8 @@ dongle_scantime_out:
 }
 
 
-static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap)
+static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
+				   u32 bw_cap[])
 {
 	struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
 	struct ieee80211_channel *band_chan_arr;
@@ -5099,7 +5104,6 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap)
 	enum ieee80211_band band;
 	u32 channel;
 	u32 *n_cnt;
-	bool ht40_allowed;
 	u32 index;
 	u32 ht40_flag;
 	bool update;
@@ -5132,18 +5136,17 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap)
 			array_size = ARRAY_SIZE(__wl_2ghz_channels);
 			n_cnt = &__wl_band_2ghz.n_channels;
 			band = IEEE80211_BAND_2GHZ;
-			ht40_allowed = (bw_cap == WLC_N_BW_40ALL);
 		} else if (ch.band == BRCMU_CHAN_BAND_5G) {
 			band_chan_arr = __wl_5ghz_a_channels;
 			array_size = ARRAY_SIZE(__wl_5ghz_a_channels);
 			n_cnt = &__wl_band_5ghz_a.n_channels;
 			band = IEEE80211_BAND_5GHZ;
-			ht40_allowed = !(bw_cap == WLC_N_BW_20ALL);
 		} else {
-			brcmf_err("Invalid channel Sepc. 0x%x.\n", ch.chspec);
+			brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec);
 			continue;
 		}
-		if (!ht40_allowed && ch.bw == BRCMU_CHAN_BW_40)
+		if (!(bw_cap[band] & WLC_BW_40MHZ_BIT) &&
+		    ch.bw == BRCMU_CHAN_BW_40)
 			continue;
 		update = false;
 		for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) {
@@ -5161,7 +5164,10 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap)
 				ieee80211_channel_to_frequency(ch.chnum, band);
 			band_chan_arr[index].hw_value = ch.chnum;
 
-			if (ch.bw == BRCMU_CHAN_BW_40 && ht40_allowed) {
+			brcmf_err("channel %d: f=%d bw=%d sb=%d\n",
+				  ch.chnum, band_chan_arr[index].center_freq,
+				  ch.bw, ch.sb);
+			if (ch.bw == BRCMU_CHAN_BW_40) {
 				/* assuming the order is HT20, HT40 Upper,
 				 * HT40 lower from chanspecs
 				 */
@@ -5197,10 +5203,10 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap)
 					if (channel & WL_CHAN_RADAR)
 						band_chan_arr[index].flags |=
 							(IEEE80211_CHAN_RADAR |
-							IEEE80211_CHAN_NO_IBSS);
+							IEEE80211_CHAN_NO_IR);
 					if (channel & WL_CHAN_PASSIVE)
 						band_chan_arr[index].flags |=
-						    IEEE80211_CHAN_PASSIVE_SCAN;
+						    IEEE80211_CHAN_NO_IR;
 				}
 			}
 			if (!update)
@@ -5212,6 +5218,46 @@ exit:
 	return err;
 }
 
+static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
+{
+	u32 band, mimo_bwcap;
+	int err;
+
+	band = WLC_BAND_2G;
+	err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
+	if (!err) {
+		bw_cap[IEEE80211_BAND_2GHZ] = band;
+		band = WLC_BAND_5G;
+		err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
+		if (!err) {
+			bw_cap[IEEE80211_BAND_5GHZ] = band;
+			return;
+		}
+		WARN_ON(1);
+		return;
+	}
+	brcmf_dbg(INFO, "fallback to mimo_bw_cap info\n");
+	mimo_bwcap = 0;
+	err = brcmf_fil_iovar_int_get(ifp, "mimo_bw_cap", &mimo_bwcap);
+	if (err)
+		/* assume 20MHz if firmware does not give a clue */
+		mimo_bwcap = WLC_N_BW_20ALL;
+
+	switch (mimo_bwcap) {
+	case WLC_N_BW_40ALL:
+		bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT;
+		/* fall-thru */
+	case WLC_N_BW_20IN2G_40IN5G:
+		bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT;
+		/* fall-thru */
+	case WLC_N_BW_20ALL:
+		bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT;
+		bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT;
+		break;
+	default:
+		brcmf_err("invalid mimo_bw_cap value\n");
+	}
+}
 
 static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
 {
@@ -5220,13 +5266,13 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
 	s32 phy_list;
 	u32 band_list[3];
 	u32 nmode;
-	u32 bw_cap = 0;
+	u32 bw_cap[2] = { 0, 0 };
 	s8 phy;
 	s32 err;
 	u32 nband;
 	s32 i;
-	struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS];
-	s32 index;
+	struct ieee80211_supported_band *bands[2] = { NULL, NULL };
+	struct ieee80211_supported_band *band;
 
 	err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_PHYLIST,
 				     &phy_list, sizeof(phy_list));
@@ -5252,11 +5298,10 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
 	if (err) {
 		brcmf_err("nmode error (%d)\n", err);
 	} else {
-		err = brcmf_fil_iovar_int_get(ifp, "mimo_bw_cap", &bw_cap);
-		if (err)
-			brcmf_err("mimo_bw_cap error (%d)\n", err);
+		brcmf_get_bwcap(ifp, bw_cap);
 	}
-	brcmf_dbg(INFO, "nmode=%d, mimo_bw_cap=%d\n", nmode, bw_cap);
+	brcmf_dbg(INFO, "nmode=%d, bw_cap=(%d, %d)\n", nmode,
+		  bw_cap[IEEE80211_BAND_2GHZ], bw_cap[IEEE80211_BAND_5GHZ]);
 
 	err = brcmf_construct_reginfo(cfg, bw_cap);
 	if (err) {
@@ -5265,40 +5310,33 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
 	}
 
 	nband = band_list[0];
-	memset(bands, 0, sizeof(bands));
 
 	for (i = 1; i <= nband && i < ARRAY_SIZE(band_list); i++) {
-		index = -1;
+		band = NULL;
 		if ((band_list[i] == WLC_BAND_5G) &&
-		    (__wl_band_5ghz_a.n_channels > 0)) {
-			index = IEEE80211_BAND_5GHZ;
-			bands[index] = &__wl_band_5ghz_a;
-			if ((bw_cap == WLC_N_BW_40ALL) ||
-			    (bw_cap == WLC_N_BW_20IN2G_40IN5G))
-				bands[index]->ht_cap.cap |=
-							IEEE80211_HT_CAP_SGI_40;
-		} else if ((band_list[i] == WLC_BAND_2G) &&
-			   (__wl_band_2ghz.n_channels > 0)) {
-			index = IEEE80211_BAND_2GHZ;
-			bands[index] = &__wl_band_2ghz;
-			if (bw_cap == WLC_N_BW_40ALL)
-				bands[index]->ht_cap.cap |=
-							IEEE80211_HT_CAP_SGI_40;
-		}
+		    (__wl_band_5ghz_a.n_channels > 0))
+			band = &__wl_band_5ghz_a;
+		else if ((band_list[i] == WLC_BAND_2G) &&
+			 (__wl_band_2ghz.n_channels > 0))
+			band = &__wl_band_2ghz;
+		else
+			continue;
 
-		if ((index >= 0) && nmode) {
-			bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
-			bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
-			bands[index]->ht_cap.ht_supported = true;
-			bands[index]->ht_cap.ampdu_factor =
-						IEEE80211_HT_MAX_AMPDU_64K;
-			bands[index]->ht_cap.ampdu_density =
-						IEEE80211_HT_MPDU_DENSITY_16;
-			/* An HT shall support all EQM rates for one spatial
-			 * stream
-			 */
-			bands[index]->ht_cap.mcs.rx_mask[0] = 0xff;
+		if (bw_cap[band->band] & WLC_BW_40MHZ_BIT) {
+			band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+			band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
 		}
+		band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+		band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+		band->ht_cap.ht_supported = true;
+		band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+		band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+		/* An HT shall support all EQM rates for one spatial
+		 * stream
+		 */
+		band->ht_cap.mcs.rx_mask[0] = 0xff;
+		band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+		bands[band->band] = band;
 	}
 
 	wiphy = cfg_to_wiphy(cfg);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index d9bdaf9a72d0..2dc6a074e8ed 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -412,7 +412,6 @@ struct brcmf_cfg80211_info {
 	struct work_struct escan_timeout_work;
 	u8 *escan_ioctl_buf;
 	struct list_head vif_list;
-	u8 vif_cnt;
 	struct brcmf_cfg80211_vif_event vif_event;
 	struct completion vif_disabled;
 	struct brcmu_d11inf d11inf;
@@ -487,8 +486,7 @@ enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp);
 struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
 					   enum nl80211_iftype type,
 					   bool pm_block);
-void brcmf_free_vif(struct brcmf_cfg80211_info *cfg,
-	            struct brcmf_cfg80211_vif *vif);
+void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
 
 s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
 			  const u8 *vndr_ie_buf, u32 vndr_ie_len);
@@ -507,5 +505,6 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
 				bool fw_abort);
 void brcmf_set_mpc(struct brcmf_if *ndev, int mpc);
 void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
+void brcmf_cfg80211_free_netdev(struct net_device *ndev);
 
 #endif				/* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index cc87926f5055..635ae034c7e5 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -59,23 +59,18 @@
 
 #define BRCM_2GHZ_2412_2462	REG_RULE(2412-10, 2462+10, 40, 0, 19, 0)
 #define BRCM_2GHZ_2467_2472	REG_RULE(2467-10, 2472+10, 20, 0, 19, \
-					 NL80211_RRF_PASSIVE_SCAN | \
-					 NL80211_RRF_NO_IBSS)
+					 NL80211_RRF_NO_IR)
 
 #define BRCM_5GHZ_5180_5240	REG_RULE(5180-10, 5240+10, 40, 0, 21, \
-					 NL80211_RRF_PASSIVE_SCAN | \
-					 NL80211_RRF_NO_IBSS)
+					 NL80211_RRF_NO_IR)
 #define BRCM_5GHZ_5260_5320	REG_RULE(5260-10, 5320+10, 40, 0, 21, \
-					 NL80211_RRF_PASSIVE_SCAN | \
 					 NL80211_RRF_DFS | \
-					 NL80211_RRF_NO_IBSS)
+					 NL80211_RRF_NO_IR)
 #define BRCM_5GHZ_5500_5700	REG_RULE(5500-10, 5700+10, 40, 0, 21, \
-					 NL80211_RRF_PASSIVE_SCAN | \
 					 NL80211_RRF_DFS | \
-					 NL80211_RRF_NO_IBSS)
+					 NL80211_RRF_NO_IR)
 #define BRCM_5GHZ_5745_5825	REG_RULE(5745-10, 5825+10, 40, 0, 21, \
-					 NL80211_RRF_PASSIVE_SCAN | \
-					 NL80211_RRF_NO_IBSS)
+					 NL80211_RRF_NO_IR)
 
 static const struct ieee80211_regdomain brcms_regdom_x2 = {
 	.n_reg_rules = 6,
@@ -395,7 +390,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
 		brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
 
 	brcms_b_set_chanspec(wlc->hw, chanspec,
-			      !!(ch->flags & IEEE80211_CHAN_PASSIVE_SCAN),
+			      !!(ch->flags & IEEE80211_CHAN_NO_IR),
 			      &txpwr);
 }
 
@@ -657,8 +652,8 @@ static void brcms_reg_apply_radar_flags(struct wiphy *wiphy)
 		 */
 		if (!(ch->flags & IEEE80211_CHAN_DISABLED))
 			ch->flags |= IEEE80211_CHAN_RADAR |
-				     IEEE80211_CHAN_NO_IBSS |
-				     IEEE80211_CHAN_PASSIVE_SCAN;
+				     IEEE80211_CHAN_NO_IR |
+				     IEEE80211_CHAN_NO_IR;
 	}
 }
 
@@ -684,18 +679,15 @@ brcms_reg_apply_beaconing_flags(struct wiphy *wiphy,
 				continue;
 
 			if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
-				rule = freq_reg_info(wiphy, ch->center_freq);
+				rule = freq_reg_info(wiphy,
+						     MHZ_TO_KHZ(ch->center_freq));
 				if (IS_ERR(rule))
 					continue;
 
-				if (!(rule->flags & NL80211_RRF_NO_IBSS))
-					ch->flags &= ~IEEE80211_CHAN_NO_IBSS;
-				if (!(rule->flags & NL80211_RRF_PASSIVE_SCAN))
-					ch->flags &=
-						~IEEE80211_CHAN_PASSIVE_SCAN;
+				if (!(rule->flags & NL80211_RRF_NO_IR))
+					ch->flags &= ~IEEE80211_CHAN_NO_IR;
 			} else if (ch->beacon_found) {
-				ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
-					       IEEE80211_CHAN_PASSIVE_SCAN);
+				ch->flags &= ~IEEE80211_CHAN_NO_IR;
 			}
 		}
 	}
@@ -775,8 +767,8 @@ void brcms_c_regd_init(struct brcms_c_info *wlc)
 	}
 
 	wlc->wiphy->reg_notifier = brcms_reg_notifier;
-	wlc->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
-			     WIPHY_FLAG_STRICT_REGULATORY;
+	wlc->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+					REGULATORY_STRICT_REG;
 	wiphy_apply_custom_regulatory(wlc->wiphy, regd->regdomain);
 	brcms_reg_apply_beaconing_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER);
 }
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index edc5d105ff98..925034b80e9c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -125,13 +125,13 @@ static struct ieee80211_channel brcms_2ghz_chantable[] = {
 	CHAN2GHZ(10, 2457, IEEE80211_CHAN_NO_HT40PLUS),
 	CHAN2GHZ(11, 2462, IEEE80211_CHAN_NO_HT40PLUS),
 	CHAN2GHZ(12, 2467,
-		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
+		 IEEE80211_CHAN_NO_IR |
 		 IEEE80211_CHAN_NO_HT40PLUS),
 	CHAN2GHZ(13, 2472,
-		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
+		 IEEE80211_CHAN_NO_IR |
 		 IEEE80211_CHAN_NO_HT40PLUS),
 	CHAN2GHZ(14, 2484,
-		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
+		 IEEE80211_CHAN_NO_IR |
 		 IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS |
 		 IEEE80211_CHAN_NO_OFDM)
 };
@@ -144,51 +144,51 @@ static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = {
 	CHAN5GHZ(48, IEEE80211_CHAN_NO_HT40PLUS),
 	/* UNII-2 */
 	CHAN5GHZ(52,
-		 IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
-		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+		 IEEE80211_CHAN_RADAR |
+		 IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
 	CHAN5GHZ(56,
-		 IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
-		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+		 IEEE80211_CHAN_RADAR |
+		 IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
 	CHAN5GHZ(60,
-		 IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
-		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+		 IEEE80211_CHAN_RADAR |
+		 IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
 	CHAN5GHZ(64,
-		 IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
-		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+		 IEEE80211_CHAN_RADAR |
+		 IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
 	/* MID */
 	CHAN5GHZ(100,
-		 IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
-		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+		 IEEE80211_CHAN_RADAR |
+		 IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
 	CHAN5GHZ(104,
-		 IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
-		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+		 IEEE80211_CHAN_RADAR |
+		 IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
 	CHAN5GHZ(108,
-		 IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
-		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+		 IEEE80211_CHAN_RADAR |
+		 IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
 	CHAN5GHZ(112,
-		 IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
-		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+		 IEEE80211_CHAN_RADAR |
+		 IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
 	CHAN5GHZ(116,
-		 IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
-		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+		 IEEE80211_CHAN_RADAR |
+		 IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
 	CHAN5GHZ(120,
-		 IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
-		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+		 IEEE80211_CHAN_RADAR |
+		 IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
 	CHAN5GHZ(124,
-		 IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
-		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+		 IEEE80211_CHAN_RADAR |
+		 IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
 	CHAN5GHZ(128,
-		 IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
-		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+		 IEEE80211_CHAN_RADAR |
+		 IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
 	CHAN5GHZ(132,
-		 IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
-		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40MINUS),
+		 IEEE80211_CHAN_RADAR |
+		 IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40MINUS),
 	CHAN5GHZ(136,
-		 IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
-		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS),
+		 IEEE80211_CHAN_RADAR |
+		 IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS),
 	CHAN5GHZ(140,
-		 IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS |
-		 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_HT40PLUS |
+		 IEEE80211_CHAN_RADAR |
+		 IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_HT40PLUS |
 		 IEEE80211_CHAN_NO_HT40MINUS),
 	/* UNII-3 */
 	CHAN5GHZ(149, IEEE80211_CHAN_NO_HT40MINUS),
@@ -1071,7 +1071,6 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
 	hw->max_rates = 2;	/* Primary rate and 1 fallback rate */
 
 	/* channel change time is dependent on chip and band  */
-	hw->channel_change_time = 7 * 1000;
 	hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
 				     BIT(NL80211_IFTYPE_AP) |
 				     BIT(NL80211_IFTYPE_ADHOC);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 8138f1cff4e5..9417cb5a2553 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -7108,7 +7108,6 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
 		     struct sk_buff *p,
 		     struct ieee80211_rx_status *rx_status)
 {
-	int preamble;
 	int channel;
 	u32 rspec;
 	unsigned char *plcp;
@@ -7191,7 +7190,6 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
 			rx_status->rate_idx -= BRCMS_LEGACY_5G_RATE_OFFSET;
 
 		/* Determine short preamble and rate_idx */
-		preamble = 0;
 		if (is_cck_rate(rspec)) {
 			if (rxh->PhyRxStatus_0 & PRXS0_SHORTH)
 				rx_status->flag |= RX_FLAG_SHORTPRE;
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 84113ea16f84..6fa5d4863782 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -41,6 +41,7 @@
 #define BCM4331_CHIP_ID		0x4331
 #define BCM4334_CHIP_ID		0x4334
 #define BCM4335_CHIP_ID		0x4335
+#define BCM43362_CHIP_ID	43362
 #define BCM4339_CHIP_ID		0x4339
 
 #endif				/* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
index 0505cc065e0d..7ca2aa1035b2 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
@@ -82,6 +82,20 @@
 #define WLC_N_BW_40ALL			1
 #define WLC_N_BW_20IN2G_40IN5G		2
 
+#define WLC_BW_20MHZ_BIT		BIT(0)
+#define WLC_BW_40MHZ_BIT		BIT(1)
+#define WLC_BW_80MHZ_BIT		BIT(2)
+#define WLC_BW_160MHZ_BIT		BIT(3)
+
+/* Bandwidth capabilities */
+#define WLC_BW_CAP_20MHZ		(WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_40MHZ		(WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_80MHZ		(WLC_BW_80MHZ_BIT|WLC_BW_40MHZ_BIT| \
+					 WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_160MHZ		(WLC_BW_160MHZ_BIT|WLC_BW_80MHZ_BIT| \
+					 WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_UNRESTRICTED		0xFF
+
 /* band types */
 #define	WLC_BAND_AUTO			0	/* auto-select */
 #define	WLC_BAND_5G			1	/* 5 Ghz */
diff --git a/drivers/net/wireless/cw1200/cw1200_sdio.c b/drivers/net/wireless/cw1200/cw1200_sdio.c
index ebdcdf44f155..d3acc85932a5 100644
--- a/drivers/net/wireless/cw1200/cw1200_sdio.c
+++ b/drivers/net/wireless/cw1200/cw1200_sdio.c
@@ -108,9 +108,9 @@ static irqreturn_t cw1200_gpio_irq(int irq, void *dev_id)
 	struct hwbus_priv *self = dev_id;
 
 	if (self->core) {
-		sdio_claim_host(self->func);
+		cw1200_sdio_lock(self);
 		cw1200_irq_handler(self->core);
-		sdio_release_host(self->func);
+		cw1200_sdio_unlock(self);
 		return IRQ_HANDLED;
 	} else {
 		return IRQ_NONE;
diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c
index acdff0f7f952..5a9ffd3a6a6c 100644
--- a/drivers/net/wireless/cw1200/fwio.c
+++ b/drivers/net/wireless/cw1200/fwio.c
@@ -14,7 +14,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/init.h>
 #include <linux/vmalloc.h>
 #include <linux/sched.h>
 #include <linux/firmware.h>
diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c
index 090f01577dd2..3e78cc3ccb78 100644
--- a/drivers/net/wireless/cw1200/main.c
+++ b/drivers/net/wireless/cw1200/main.c
@@ -21,7 +21,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/firmware.h>
 #include <linux/etherdevice.h>
 #include <linux/vmalloc.h>
@@ -302,7 +301,6 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
 
 	hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
 
-	hw->channel_change_time = 1000;	/* TODO: find actual value */
 	hw->queues = 4;
 
 	priv->rts_threshold = -1;
diff --git a/drivers/net/wireless/cw1200/pm.c b/drivers/net/wireless/cw1200/pm.c
index b37abb9f0453..6907c8fd4578 100644
--- a/drivers/net/wireless/cw1200/pm.c
+++ b/drivers/net/wireless/cw1200/pm.c
@@ -225,7 +225,7 @@ int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
 		cw1200_set_pm(priv, &priv->powersave_mode);
 		if (wait_event_interruptible_timeout(priv->ps_mode_switch_done,
 						     !priv->ps_mode_switch_in_progress, 1*HZ) <= 0) {
-			goto revert3;
+			goto revert4;
 		}
 	}
 
@@ -254,11 +254,11 @@ int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
 
 	/* Stop serving thread */
 	if (cw1200_bh_suspend(priv))
-		goto revert4;
+		goto revert5;
 
 	ret = timer_pending(&priv->mcast_timeout);
 	if (ret)
-		goto revert5;
+		goto revert6;
 
 	/* Store suspend state */
 	pm_state->suspend_state = state;
@@ -280,9 +280,9 @@ int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
 
 	return 0;
 
-revert5:
+revert6:
 	WARN_ON(cw1200_bh_resume(priv));
-revert4:
+revert5:
 	cw1200_resume_work(priv, &priv->bss_loss_work,
 			   state->bss_loss_tmo);
 	cw1200_resume_work(priv, &priv->join_timeout,
@@ -291,6 +291,7 @@ revert4:
 			   state->direct_probe);
 	cw1200_resume_work(priv, &priv->link_id_gc_work,
 			   state->link_id_gc);
+revert4:
 	kfree(state);
 revert3:
 	wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_off);
diff --git a/drivers/net/wireless/cw1200/scan.c b/drivers/net/wireless/cw1200/scan.c
index ee3c19037aac..9afcd4ce3368 100644
--- a/drivers/net/wireless/cw1200/scan.c
+++ b/drivers/net/wireless/cw1200/scan.c
@@ -173,8 +173,9 @@ void cw1200_scan_work(struct work_struct *work)
 			cw1200_set_pm(priv, &priv->powersave_mode);
 
 		if (priv->scan.status < 0)
-			wiphy_dbg(priv->hw->wiphy, "[SCAN] Scan failed (%d).\n",
-				  priv->scan.status);
+			wiphy_warn(priv->hw->wiphy,
+				   "[SCAN] Scan failed (%d).\n",
+				   priv->scan.status);
 		else if (priv->scan.req)
 			wiphy_dbg(priv->hw->wiphy,
 				  "[SCAN] Scan completed.\n");
@@ -197,9 +198,9 @@ void cw1200_scan_work(struct work_struct *work)
 			if ((*it)->band != first->band)
 				break;
 			if (((*it)->flags ^ first->flags) &
-					IEEE80211_CHAN_PASSIVE_SCAN)
+					IEEE80211_CHAN_NO_IR)
 				break;
-			if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+			if (!(first->flags & IEEE80211_CHAN_NO_IR) &&
 			    (*it)->max_power != first->max_power)
 				break;
 		}
@@ -210,7 +211,7 @@ void cw1200_scan_work(struct work_struct *work)
 		else
 			scan.max_tx_rate = WSM_TRANSMIT_RATE_1;
 		scan.num_probes =
-			(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) ? 0 : 2;
+			(first->flags & IEEE80211_CHAN_NO_IR) ? 0 : 2;
 		scan.num_ssids = priv->scan.n_ssids;
 		scan.ssids = &priv->scan.ssids[0];
 		scan.num_channels = it - priv->scan.curr;
@@ -233,7 +234,7 @@ void cw1200_scan_work(struct work_struct *work)
 		}
 		for (i = 0; i < scan.num_channels; ++i) {
 			scan.ch[i].number = priv->scan.curr[i]->hw_value;
-			if (priv->scan.curr[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN) {
+			if (priv->scan.curr[i]->flags & IEEE80211_CHAN_NO_IR) {
 				scan.ch[i].min_chan_time = 50;
 				scan.ch[i].max_chan_time = 100;
 			} else {
@@ -241,7 +242,7 @@ void cw1200_scan_work(struct work_struct *work)
 				scan.ch[i].max_chan_time = 25;
 			}
 		}
-		if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+		if (!(first->flags & IEEE80211_CHAN_NO_IR) &&
 		    priv->scan.output_power != first->max_power) {
 			priv->scan.output_power = first->max_power;
 			wsm_set_output_power(priv,
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index 010b252be584..103f7bce8932 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -13,6 +13,7 @@
 #include <linux/sched.h>
 #include <linux/firmware.h>
 #include <linux/module.h>
+#include <linux/etherdevice.h>
 
 #include "cw1200.h"
 #include "sta.h"
@@ -555,8 +556,8 @@ u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
 		pr_debug("[STA] multicast: %pM\n", ha->addr);
 		memcpy(&priv->multicast_filter.macaddrs[count],
 		       ha->addr, ETH_ALEN);
-		if (memcmp(ha->addr, broadcast_ipv4, ETH_ALEN) &&
-		    memcmp(ha->addr, broadcast_ipv6, ETH_ALEN))
+		if (!ether_addr_equal(ha->addr, broadcast_ipv4) &&
+		    !ether_addr_equal(ha->addr, broadcast_ipv6))
 			priv->has_multicast_subscription = true;
 		count++;
 	}
diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/cw1200/txrx.c
index e824d4d4a18d..0bd541175ecd 100644
--- a/drivers/net/wireless/cw1200/txrx.c
+++ b/drivers/net/wireless/cw1200/txrx.c
@@ -1166,8 +1166,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
 			return;
 	} else if (ieee80211_is_beacon(frame->frame_control) &&
 		   !arg->status && priv->vif &&
-		   !memcmp(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid,
-			   ETH_ALEN)) {
+		   ether_addr_equal(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid)) {
 		const u8 *tim_ie;
 		u8 *ies = ((struct ieee80211_mgmt *)
 			  (skb->data))->u.beacon.variable;
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index d39e3e24077b..599f30f22841 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -563,7 +563,7 @@ hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr *hdr, u16 fc,
 
 	/* Possible WDS frame: either IEEE 802.11 compliant (if FromDS)
 	 * or own non-standard frame with 4th address after payload */
-	if (memcmp(hdr->addr1, local->dev->dev_addr, ETH_ALEN) != 0 &&
+	if (!ether_addr_equal(hdr->addr1, local->dev->dev_addr) &&
 	    (hdr->addr1[0] != 0xff || hdr->addr1[1] != 0xff ||
 	     hdr->addr1[2] != 0xff || hdr->addr1[3] != 0xff ||
 	     hdr->addr1[4] != 0xff || hdr->addr1[5] != 0xff)) {
@@ -622,12 +622,12 @@ static int hostap_is_eapol_frame(local_info_t *local, struct sk_buff *skb)
 	/* check that the frame is unicast frame to us */
 	if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
 	    IEEE80211_FCTL_TODS &&
-	    memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 &&
-	    memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
+	    ether_addr_equal(hdr->addr1, dev->dev_addr) &&
+	    ether_addr_equal(hdr->addr3, dev->dev_addr)) {
 		/* ToDS frame with own addr BSSID and DA */
 	} else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
 		   IEEE80211_FCTL_FROMDS &&
-		   memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
+		   ether_addr_equal(hdr->addr1, dev->dev_addr)) {
 		/* FromDS frame with own addr as DA */
 	} else
 		return 0;
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index 344a981a052e..8bde77689469 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -1,5 +1,6 @@
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/etherdevice.h>
 
 #include "hostap_80211.h"
 #include "hostap_common.h"
@@ -103,8 +104,7 @@ netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb,
 			return NETDEV_TX_OK;
 		} else if (local->iw_mode == IW_MODE_INFRA &&
 			   (local->wds_type & HOSTAP_WDS_AP_CLIENT) &&
-			   memcmp(skb->data + ETH_ALEN, dev->dev_addr,
-				  ETH_ALEN) != 0) {
+			   !ether_addr_equal(skb->data + ETH_ALEN, dev->dev_addr)) {
 			/* AP client mode: send frames with foreign src addr
 			 * using 4-addr WDS frames */
 			use_wds = WDS_COMPLIANT_FRAME;
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index d6033a8e5dea..d36e252d2ccb 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/export.h>
 #include <linux/moduleparam.h>
+#include <linux/etherdevice.h>
 
 #include "hostap_wlan.h"
 #include "hostap.h"
@@ -106,13 +107,12 @@ static void ap_sta_hash_del(struct ap_data *ap, struct sta_info *sta)
 
 	s = ap->sta_hash[STA_HASH(sta->addr)];
 	if (s == NULL) return;
-	if (memcmp(s->addr, sta->addr, ETH_ALEN) == 0) {
+	if (ether_addr_equal(s->addr, sta->addr)) {
 		ap->sta_hash[STA_HASH(sta->addr)] = s->hnext;
 		return;
 	}
 
-	while (s->hnext != NULL && memcmp(s->hnext->addr, sta->addr, ETH_ALEN)
-	       != 0)
+	while (s->hnext != NULL && !ether_addr_equal(s->hnext->addr, sta->addr))
 		s = s->hnext;
 	if (s->hnext != NULL)
 		s->hnext = s->hnext->hnext;
@@ -435,7 +435,7 @@ int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
 	     ptr != &mac_restrictions->mac_list; ptr = ptr->next) {
 		entry = list_entry(ptr, struct mac_entry, list);
 
-		if (memcmp(entry->addr, mac, ETH_ALEN) == 0) {
+		if (ether_addr_equal(entry->addr, mac)) {
 			list_del(ptr);
 			kfree(entry);
 			mac_restrictions->entries--;
@@ -459,7 +459,7 @@ static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions,
 
 	spin_lock_bh(&mac_restrictions->lock);
 	list_for_each_entry(entry, &mac_restrictions->mac_list, list) {
-		if (memcmp(entry->addr, mac, ETH_ALEN) == 0) {
+		if (ether_addr_equal(entry->addr, mac)) {
 			found = 1;
 			break;
 		}
@@ -957,7 +957,7 @@ static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta)
 	struct sta_info *s;
 
 	s = ap->sta_hash[STA_HASH(sta)];
-	while (s != NULL && memcmp(s->addr, sta, ETH_ALEN) != 0)
+	while (s != NULL && !ether_addr_equal(s->addr, sta))
 		s = s->hnext;
 	return s;
 }
@@ -1391,7 +1391,7 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb,
 	status_code = __le16_to_cpu(*pos);
 	pos++;
 
-	if (memcmp(dev->dev_addr, hdr->addr2, ETH_ALEN) == 0 ||
+	if (ether_addr_equal(dev->dev_addr, hdr->addr2) ||
 	    ap_control_mac_deny(&ap->mac_restrictions, hdr->addr2)) {
 		txt = "authentication denied";
 		resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
@@ -1935,7 +1935,7 @@ static void handle_pspoll(local_info_t *local,
 	PDEBUG(DEBUG_PS2, "handle_pspoll: BSSID=%pM, TA=%pM PWRMGT=%d\n",
 	       hdr->addr1, hdr->addr2, !!ieee80211_has_pm(hdr->frame_control));
 
-	if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
+	if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) {
 		PDEBUG(DEBUG_AP,
 		       "handle_pspoll - addr1(BSSID)=%pM not own MAC\n",
 		       hdr->addr1);
@@ -2230,7 +2230,7 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb,
 			goto done;
 		}
 
-		if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
+		if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) {
 			PDEBUG(DEBUG_AP, "handle_ap_item - addr1(BSSID)=%pM"
 			       " not own MAC\n", hdr->addr1);
 			goto done;
@@ -2267,13 +2267,13 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb,
 		goto done;
 	}
 
-	if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
+	if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) {
 		PDEBUG(DEBUG_AP, "handle_ap_item - addr1(DA)=%pM"
 		       " not own MAC\n", hdr->addr1);
 		goto done;
 	}
 
-	if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN)) {
+	if (!ether_addr_equal(hdr->addr3, dev->dev_addr)) {
 		PDEBUG(DEBUG_AP, "handle_ap_item - addr3(BSSID)=%pM"
 		       " not own MAC\n", hdr->addr3);
 		goto done;
@@ -3035,7 +3035,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
 		if (!wds) {
 			/* FromDS frame - not for us; probably
 			 * broadcast/multicast in another BSS - drop */
-			if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
+			if (ether_addr_equal(hdr->addr1, dev->dev_addr)) {
 				printk(KERN_DEBUG "Odd.. FromDS packet "
 				       "received with own BSSID\n");
 				hostap_dump_rx_80211(dev->name, skb, rx_stats);
@@ -3044,7 +3044,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
 			goto out;
 		}
 	} else if (stype == IEEE80211_STYPE_NULLFUNC && sta == NULL &&
-		   memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
+		   ether_addr_equal(hdr->addr1, dev->dev_addr)) {
 
 		if (local->hostapd) {
 			prism2_rx_80211(local->apdev, skb, rx_stats,
@@ -3073,7 +3073,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
 		/* If BSSID (Addr3) is foreign, this frame is a normal
 		 * broadcast frame from an IBSS network. Drop it silently.
 		 * If BSSID is own, report the dropping of this frame. */
-		if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
+		if (ether_addr_equal(hdr->addr3, dev->dev_addr)) {
 			printk(KERN_DEBUG "%s: dropped received packet from %pM"
 			       " with no ToDS flag "
 			       "(type=0x%02x, subtype=0x%02x)\n", dev->name,
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 56cd01ca8ad0..9f825f2620da 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -1,7 +1,6 @@
 #define PRISM2_PCCARD
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/if.h>
 #include <linux/slab.h>
 #include <linux/wait.h>
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index c275dc1623fe..6df3ee561d52 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -2175,7 +2175,7 @@ static void hostap_tx_callback(local_info_t *local,
 	struct hostap_tx_callback_info *cb;
 
 	/* Make sure that frame was from us. */
-	if (memcmp(txdesc->addr2, local->dev->dev_addr, ETH_ALEN)) {
+	if (!ether_addr_equal(txdesc->addr2, local->dev->dev_addr)) {
 		printk(KERN_DEBUG "%s: TX callback - foreign frame\n",
 		       local->dev->name);
 		return;
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index e5090309824e..3e5fa7872b64 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -655,7 +655,7 @@ static int hostap_join_ap(struct net_device *dev)
 		if (!local->last_scan_results)
 			break;
 		entry = &local->last_scan_results[i];
-		if (memcmp(local->preferred_ap, entry->bssid, ETH_ALEN) == 0) {
+		if (ether_addr_equal(local->preferred_ap, entry->bssid)) {
 			req.channel = entry->chid;
 			break;
 		}
@@ -1978,7 +1978,7 @@ static inline int prism2_translate_scan(local_info_t *local,
 		list_for_each(ptr, &local->bss_list) {
 			struct hostap_bss_info *bss;
 			bss = list_entry(ptr, struct hostap_bss_info, list);
-			if (memcmp(bss->bssid, scan->bssid, ETH_ALEN) == 0) {
+			if (ether_addr_equal(bss->bssid, scan->bssid)) {
 				bss->included = 1;
 				current_ev = __prism2_translate_scan(
 					local, info, scan, bss, current_ev,
@@ -2567,7 +2567,7 @@ static int prism2_ioctl_priv_prism2_param(struct net_device *dev,
 		local->passive_scan_interval = value;
 		if (timer_pending(&local->passive_scan_timer))
 			del_timer(&local->passive_scan_timer);
-		if (value > 0) {
+		if (value > 0 && value < INT_MAX / HZ) {
 			local->passive_scan_timer.expires = jiffies +
 				local->passive_scan_interval * HZ;
 			add_timer(&local->passive_scan_timer);
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index a1257c92afc4..67db34e56d7e 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -155,8 +155,7 @@ int prism2_wds_add(local_info_t *local, u8 *remote_addr,
 
 		if (prism2_wds_special_addr(iface->u.wds.remote_addr))
 			empty = iface;
-		else if (memcmp(iface->u.wds.remote_addr, remote_addr,
-				ETH_ALEN) == 0) {
+		else if (ether_addr_equal(iface->u.wds.remote_addr, remote_addr)) {
 			match = iface;
 			break;
 		}
@@ -214,8 +213,7 @@ int prism2_wds_del(local_info_t *local, u8 *remote_addr,
 		if (iface->type != HOSTAP_INTERFACE_WDS)
 			continue;
 
-		if (memcmp(iface->u.wds.remote_addr, remote_addr,
-			   ETH_ALEN) == 0) {
+		if (ether_addr_equal(iface->u.wds.remote_addr, remote_addr)) {
 			selected = iface;
 			break;
 		}
@@ -1085,7 +1083,7 @@ int prism2_sta_deauth(local_info_t *local, u16 reason)
 
 	if (local->iw_mode != IW_MODE_INFRA ||
 	    is_zero_ether_addr(local->bssid) ||
-	    memcmp(local->bssid, "\x44\x44\x44\x44\x44\x44", ETH_ALEN) == 0)
+	    ether_addr_equal(local->bssid, "\x44\x44\x44\x44\x44\x44"))
 		return 0;
 
 	ret = prism2_sta_send_mgmt(local, local->bssid, IEEE80211_STYPE_DEAUTH,
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 05ca3402dca7..91158e2e961c 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -5,7 +5,6 @@
  * Andy Warner <andyw@pobox.com> */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/if.h>
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index c3d067ee4db9..3bf530d9a40f 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -8,7 +8,6 @@
 
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/if.h>
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index f8ab193009cd..3aba49259ef1 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1930,10 +1930,10 @@ static int ipw2100_wdev_init(struct net_device *dev)
 			bg_band->channels[i].max_power = geo->bg[i].max_power;
 			if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
 				bg_band->channels[i].flags |=
-					IEEE80211_CHAN_PASSIVE_SCAN;
+					IEEE80211_CHAN_NO_IR;
 			if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
 				bg_band->channels[i].flags |=
-					IEEE80211_CHAN_NO_IBSS;
+					IEEE80211_CHAN_NO_IR;
 			if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
 				bg_band->channels[i].flags |=
 					IEEE80211_CHAN_RADAR;
@@ -6362,7 +6362,6 @@ out:
 				   &ipw2100_attribute_group);
 
 		free_libipw(dev, 0);
-		pci_set_drvdata(pci_dev, NULL);
 	}
 
 	pci_iounmap(pci_dev, ioaddr);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 81903e33d5b1..139326065bd9 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -3012,7 +3012,7 @@ static void ipw_remove_current_network(struct ipw_priv *priv)
 	spin_lock_irqsave(&priv->ieee->lock, flags);
 	list_for_each_safe(element, safe, &priv->ieee->network_list) {
 		network = list_entry(element, struct libipw_network, list);
-		if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
+		if (ether_addr_equal(network->bssid, priv->bssid)) {
 			list_del(element);
 			list_add_tail(&network->list,
 				      &priv->ieee->network_free_list);
@@ -3921,7 +3921,7 @@ static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
 	int i;
 
 	for (i = 0; i < priv->num_stations; i++) {
-		if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
+		if (ether_addr_equal(priv->stations[i], bssid)) {
 			/* Another node is active in network */
 			priv->missed_adhoc_beacons = 0;
 			if (!(priv->config & CFG_STATIC_CHANNEL))
@@ -3953,7 +3953,7 @@ static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
 	int i;
 
 	for (i = 0; i < priv->num_stations; i++)
-		if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
+		if (ether_addr_equal(priv->stations[i], bssid))
 			return i;
 
 	return IPW_INVALID_STATION;
@@ -5622,7 +5622,7 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv,
 		return 0;
 	}
 
-	if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
+	if (ether_addr_equal(network->bssid, priv->bssid)) {
 		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
 				"because of the same BSSID match: %pM"
 				".\n", print_ssid(ssid, network->ssid,
@@ -5849,7 +5849,7 @@ static int ipw_best_network(struct ipw_priv *priv,
 	}
 
 	if ((priv->config & CFG_STATIC_BSSID) &&
-	    memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
+	    !ether_addr_equal(network->bssid, priv->bssid)) {
 		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
 				"because of BSSID mismatch: %pM.\n",
 				print_ssid(ssid, network->ssid,
@@ -6988,7 +6988,7 @@ static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
 	}
 	if ((priv->status & STATUS_ASSOCIATED) &&
 	    (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
-		if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
+		if (!ether_addr_equal(network->bssid, priv->bssid))
 			if (network->capability & WLAN_CAPABILITY_IBSS)
 				if ((network->ssid_len ==
 				     priv->assoc_network->ssid_len) &&
@@ -8210,29 +8210,29 @@ static int is_network_packet(struct ipw_priv *priv,
 	switch (priv->ieee->iw_mode) {
 	case IW_MODE_ADHOC:	/* Header: Dest. | Source    | BSSID */
 		/* packets from our adapter are dropped (echo) */
-		if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
+		if (ether_addr_equal(header->addr2, priv->net_dev->dev_addr))
 			return 0;
 
 		/* {broad,multi}cast packets to our BSSID go through */
 		if (is_multicast_ether_addr(header->addr1))
-			return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
+			return ether_addr_equal(header->addr3, priv->bssid);
 
 		/* packets to our adapter go through */
-		return !memcmp(header->addr1, priv->net_dev->dev_addr,
-			       ETH_ALEN);
+		return ether_addr_equal(header->addr1,
+					priv->net_dev->dev_addr);
 
 	case IW_MODE_INFRA:	/* Header: Dest. | BSSID | Source */
 		/* packets from our adapter are dropped (echo) */
-		if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
+		if (ether_addr_equal(header->addr3, priv->net_dev->dev_addr))
 			return 0;
 
 		/* {broad,multi}cast packets to our BSS go through */
 		if (is_multicast_ether_addr(header->addr1))
-			return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
+			return ether_addr_equal(header->addr2, priv->bssid);
 
 		/* packets to our adapter go through */
-		return !memcmp(header->addr1, priv->net_dev->dev_addr,
-			       ETH_ALEN);
+		return ether_addr_equal(header->addr1,
+					priv->net_dev->dev_addr);
 	}
 
 	return 1;
@@ -8260,7 +8260,7 @@ static  int is_duplicate_packet(struct ipw_priv *priv,
 			list_for_each(p, &priv->ibss_mac_hash[index]) {
 				entry =
 				    list_entry(p, struct ipw_ibss_seq, list);
-				if (!memcmp(entry->mac, mac, ETH_ALEN))
+				if (ether_addr_equal(entry->mac, mac))
 					break;
 			}
 			if (p == &priv->ibss_mac_hash[index]) {
@@ -8329,7 +8329,7 @@ static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
 	      IEEE80211_STYPE_PROBE_RESP) ||
 	     (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
 	      IEEE80211_STYPE_BEACON))) {
-		if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
+		if (ether_addr_equal(header->addr3, priv->bssid))
 			ipw_add_station(priv, header->addr2);
 	}
 
@@ -9045,7 +9045,7 @@ static int ipw_wx_set_wap(struct net_device *dev,
 	}
 
 	priv->config |= CFG_STATIC_BSSID;
-	if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
+	if (ether_addr_equal(priv->bssid, wrqu->ap_addr.sa_data)) {
 		IPW_DEBUG_WX("BSSID set to current BSSID.\n");
 		mutex_unlock(&priv->mutex);
 		return 0;
@@ -11472,10 +11472,10 @@ static int ipw_wdev_init(struct net_device *dev)
 			bg_band->channels[i].max_power = geo->bg[i].max_power;
 			if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
 				bg_band->channels[i].flags |=
-					IEEE80211_CHAN_PASSIVE_SCAN;
+					IEEE80211_CHAN_NO_IR;
 			if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
 				bg_band->channels[i].flags |=
-					IEEE80211_CHAN_NO_IBSS;
+					IEEE80211_CHAN_NO_IR;
 			if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
 				bg_band->channels[i].flags |=
 					IEEE80211_CHAN_RADAR;
@@ -11511,10 +11511,10 @@ static int ipw_wdev_init(struct net_device *dev)
 			a_band->channels[i].max_power = geo->a[i].max_power;
 			if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
 				a_band->channels[i].flags |=
-					IEEE80211_CHAN_PASSIVE_SCAN;
+					IEEE80211_CHAN_NO_IR;
 			if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
 				a_band->channels[i].flags |=
-					IEEE80211_CHAN_NO_IBSS;
+					IEEE80211_CHAN_NO_IR;
 			if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
 				a_band->channels[i].flags |=
 					IEEE80211_CHAN_RADAR;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h
index 570d6fb88967..aa301d1eee3c 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/ipw2x00/ipw2200.h
@@ -29,7 +29,6 @@
 
 #include <linux/module.h>
 #include <linux/moduleparam.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/mutex.h>
 
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index 9ffe65931b29..a586a85bfcfe 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -874,13 +874,13 @@ void libipw_rx_any(struct libipw_device *ieee,
 	switch (ieee->iw_mode) {
 	case IW_MODE_ADHOC:
 		/* our BSS and not from/to DS */
-		if (memcmp(hdr->addr3, ieee->bssid, ETH_ALEN) == 0)
+		if (ether_addr_equal(hdr->addr3, ieee->bssid))
 		if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == 0) {
 			/* promisc: get all */
 			if (ieee->dev->flags & IFF_PROMISC)
 				is_packet_for_us = 1;
 			/* to us */
-			else if (memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN) == 0)
+			else if (ether_addr_equal(hdr->addr1, ieee->dev->dev_addr))
 				is_packet_for_us = 1;
 			/* mcast */
 			else if (is_multicast_ether_addr(hdr->addr1))
@@ -889,18 +889,18 @@ void libipw_rx_any(struct libipw_device *ieee,
 		break;
 	case IW_MODE_INFRA:
 		/* our BSS (== from our AP) and from DS */
-		if (memcmp(hdr->addr2, ieee->bssid, ETH_ALEN) == 0)
+		if (ether_addr_equal(hdr->addr2, ieee->bssid))
 		if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS) {
 			/* promisc: get all */
 			if (ieee->dev->flags & IFF_PROMISC)
 				is_packet_for_us = 1;
 			/* to us */
-			else if (memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN) == 0)
+			else if (ether_addr_equal(hdr->addr1, ieee->dev->dev_addr))
 				is_packet_for_us = 1;
 			/* mcast */
 			else if (is_multicast_ether_addr(hdr->addr1)) {
 				/* not our own packet bcasted from AP */
-				if (memcmp(hdr->addr3, ieee->dev->dev_addr, ETH_ALEN))
+				if (!ether_addr_equal(hdr->addr3, ieee->dev->dev_addr))
 					is_packet_for_us = 1;
 			}
 		}
@@ -1468,7 +1468,7 @@ static inline int is_same_network(struct libipw_network *src,
 	 * as one network */
 	return ((src->ssid_len == dst->ssid_len) &&
 		(src->channel == dst->channel) &&
-		ether_addr_equal(src->bssid, dst->bssid) &&
+		ether_addr_equal_64bits(src->bssid, dst->bssid) &&
 		!memcmp(src->ssid, dst->ssid, src->ssid_len));
 }
 
diff --git a/drivers/net/wireless/iwlegacy/3945-debug.c b/drivers/net/wireless/iwlegacy/3945-debug.c
index f767dd106b09..c1b4441fb8b2 100644
--- a/drivers/net/wireless/iwlegacy/3945-debug.c
+++ b/drivers/net/wireless/iwlegacy/3945-debug.c
@@ -48,7 +48,7 @@ il3945_stats_flag(struct il_priv *il, char *buf, int bufsz)
 	return p;
 }
 
-ssize_t
+static ssize_t
 il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
 			   size_t count, loff_t *ppos)
 {
@@ -313,7 +313,7 @@ il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
 	return ret;
 }
 
-ssize_t
+static ssize_t
 il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
 			   size_t count, loff_t *ppos)
 {
@@ -403,7 +403,7 @@ il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
 	return ret;
 }
 
-ssize_t
+static ssize_t
 il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
 				size_t count, loff_t *ppos)
 {
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index dea3b50d68b9..0487461ae4da 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -1595,7 +1595,7 @@ il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
 		 *  and use long active_dwell time.
 		 */
 		if (!is_active || il_is_channel_passive(ch_info) ||
-		    (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
+		    (chan->flags & IEEE80211_CHAN_NO_IR)) {
 			scan_ch->type = 0;	/* passive */
 			if (IL_UCODE_API(il->ucode_ver) == 1)
 				scan_ch->active_dwell =
@@ -2396,8 +2396,7 @@ __il3945_up(struct il_priv *il)
 		clear_bit(S_RFKILL, &il->status);
 	else {
 		set_bit(S_RFKILL, &il->status);
-		IL_WARN("Radio disabled by HW RF Kill switch\n");
-		return -ENODEV;
+		return -ERFKILL;
 	}
 
 	_il_wr(il, CSR_INT, 0xFFFFFFFF);
@@ -3575,9 +3574,9 @@ il3945_setup_mac(struct il_priv *il)
 	hw->wiphy->interface_modes =
 	    BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
 
-	hw->wiphy->flags |=
-	    WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
-	    WIPHY_FLAG_IBSS_RSN;
+	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+	hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+				       REGULATORY_DISABLE_BEACON_HINTS;
 
 	hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index aea667b430c3..9a45f6f626f6 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -25,7 +25,6 @@
  *****************************************************************************/
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/skbuff.h>
 #include <linux/slab.h>
 #include <net/mac80211.h>
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index f09e257759d5..d37a6fd90d40 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -26,7 +26,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
@@ -466,10 +465,10 @@ il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
 	switch (il->iw_mode) {
 	case NL80211_IFTYPE_ADHOC:	/* Header: Dest. | Source    | BSSID */
 		/* packets to our IBSS update information */
-		return ether_addr_equal(header->addr3, il->bssid);
+		return ether_addr_equal_64bits(header->addr3, il->bssid);
 	case NL80211_IFTYPE_STATION:	/* Header: Dest. | AP{BSSID} | Source */
 		/* packets to our IBSS update information */
-		return ether_addr_equal(header->addr2, il->bssid);
+		return ether_addr_equal_64bits(header->addr2, il->bssid);
 	default:
 		return 1;
 	}
diff --git a/drivers/net/wireless/iwlegacy/4965-debug.c b/drivers/net/wireless/iwlegacy/4965-debug.c
index c8153fc64f74..e0597bfdddb8 100644
--- a/drivers/net/wireless/iwlegacy/4965-debug.c
+++ b/drivers/net/wireless/iwlegacy/4965-debug.c
@@ -55,7 +55,7 @@ il4965_stats_flag(struct il_priv *il, char *buf, int bufsz)
 	return p;
 }
 
-ssize_t
+static ssize_t
 il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
 			   size_t count, loff_t *ppos)
 {
@@ -467,7 +467,7 @@ il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
 	return ret;
 }
 
-ssize_t
+static ssize_t
 il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
 			   size_t count, loff_t *ppos)
 {
@@ -633,7 +633,7 @@ il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
 	return ret;
 }
 
-ssize_t
+static ssize_t
 il4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
 				size_t count, loff_t *ppos)
 {
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 3982ab76f375..43f488a8cda2 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -805,7 +805,7 @@ il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
 		}
 
 		if (!is_active || il_is_channel_passive(ch_info) ||
-		    (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+		    (chan->flags & IEEE80211_CHAN_NO_IR))
 			scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
 		else
 			scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
@@ -5778,9 +5778,9 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
 	hw->wiphy->interface_modes =
 	    BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
 
-	hw->wiphy->flags |=
-	    WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
-	    WIPHY_FLAG_IBSS_RSN;
+	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+	hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+				       REGULATORY_DISABLE_BEACON_HINTS;
 
 	/*
 	 * For now, disable PS by default because it affects
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 3ccbaf791b48..4d5e33259ca8 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -24,7 +24,6 @@
  *
  *****************************************************************************/
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/skbuff.h>
 #include <linux/slab.h>
 #include <net/mac80211.h>
diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c
index 777a578294bd..fe47db9c20cd 100644
--- a/drivers/net/wireless/iwlegacy/4965.c
+++ b/drivers/net/wireless/iwlegacy/4965.c
@@ -26,7 +26,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index b03e22ef5462..02e8233ccf29 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -33,7 +33,6 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/lockdep.h>
-#include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
@@ -3445,10 +3444,10 @@ il_init_geos(struct il_priv *il)
 
 		if (il_is_channel_valid(ch)) {
 			if (!(ch->flags & EEPROM_CHANNEL_IBSS))
-				geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
+				geo_ch->flags |= IEEE80211_CHAN_NO_IR;
 
 			if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
-				geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+				geo_ch->flags |= IEEE80211_CHAN_NO_IR;
 
 			if (ch->flags & EEPROM_CHANNEL_RADAR)
 				geo_ch->flags |= IEEE80211_CHAN_RADAR;
@@ -3746,10 +3745,10 @@ il_full_rxon_required(struct il_priv *il)
 
 	/* These items are only settable from the full RXON command */
 	CHK(!il_is_associated(il));
-	CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr));
-	CHK(!ether_addr_equal(staging->node_addr, active->node_addr));
-	CHK(!ether_addr_equal(staging->wlap_bssid_addr,
-			      active->wlap_bssid_addr));
+	CHK(!ether_addr_equal_64bits(staging->bssid_addr, active->bssid_addr));
+	CHK(!ether_addr_equal_64bits(staging->node_addr, active->node_addr));
+	CHK(!ether_addr_equal_64bits(staging->wlap_bssid_addr,
+				     active->wlap_bssid_addr));
 	CHK_NEQ(staging->dev_type, active->dev_type);
 	CHK_NEQ(staging->channel, active->channel);
 	CHK_NEQ(staging->air_propagation, active->air_propagation);
diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c
index eff26501d60a..344010153196 100644
--- a/drivers/net/wireless/iwlegacy/debug.c
+++ b/drivers/net/wireless/iwlegacy/debug.c
@@ -31,7 +31,7 @@
 
 #include "common.h"
 
-void
+static void
 il_clear_traffic_stats(struct il_priv *il)
 {
 	memset(&il->tx_stats, 0, sizeof(struct traffic_stats));
@@ -567,12 +567,12 @@ il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
 				      flags & IEEE80211_CHAN_RADAR ?
 				      " (IEEE 802.11h required)" : "",
 				      ((channels[i].
-					flags & IEEE80211_CHAN_NO_IBSS) ||
+					flags & IEEE80211_CHAN_NO_IR) ||
 				       (channels[i].
 					flags & IEEE80211_CHAN_RADAR)) ? "" :
 				      ", IBSS",
 				      channels[i].
-				      flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+				      flags & IEEE80211_CHAN_NO_IR ?
 				      "passive only" : "active/passive");
 	}
 	supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ);
@@ -594,12 +594,12 @@ il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
 				      flags & IEEE80211_CHAN_RADAR ?
 				      " (IEEE 802.11h required)" : "",
 				      ((channels[i].
-					flags & IEEE80211_CHAN_NO_IBSS) ||
+					flags & IEEE80211_CHAN_NO_IR) ||
 				       (channels[i].
 					flags & IEEE80211_CHAN_RADAR)) ? "" :
 				      ", IBSS",
 				      channels[i].
-				      flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+				      flags & IEEE80211_CHAN_NO_IR ?
 				      "passive only" : "active/passive");
 	}
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 23d5f0275ce9..562772d85102 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index 1b0f0d502568..be1086c87157 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.h b/drivers/net/wireless/iwlwifi/dvm/calib.h
index cfddde194940..aeae4e80ea40 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.h
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index ebdac909f0cd..751ae1d10b7f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index d94f8ab15004..d2fe2596d54e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -2,7 +2,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -352,12 +352,12 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
 					channels[i].max_power,
 					channels[i].flags & IEEE80211_CHAN_RADAR ?
 					" (IEEE 802.11h required)" : "",
-					((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
+					((channels[i].flags & IEEE80211_CHAN_NO_IR)
 					|| (channels[i].flags &
 					IEEE80211_CHAN_RADAR)) ? "" :
 					", IBSS",
 					channels[i].flags &
-					IEEE80211_CHAN_PASSIVE_SCAN ?
+					IEEE80211_CHAN_NO_IR ?
 					"passive only" : "active/passive");
 	}
 	supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
@@ -375,12 +375,12 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
 					channels[i].max_power,
 					channels[i].flags & IEEE80211_CHAN_RADAR ?
 					" (IEEE 802.11h required)" : "",
-					((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
+					((channels[i].flags & IEEE80211_CHAN_NO_IR)
 					|| (channels[i].flags &
 					IEEE80211_CHAN_RADAR)) ? "" :
 					", IBSS",
 					channels[i].flags &
-					IEEE80211_CHAN_PASSIVE_SCAN ?
+					IEEE80211_CHAN_NO_IR ?
 					"passive only" : "active/passive");
 	}
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 7434d9edf3b7..3441f70d0ff9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 352c6cb7b4f1..7b140e487deb 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/led.c b/drivers/net/wireless/iwlwifi/dvm/led.c
index 33c7e15d24f5..ca4d6692cc4e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/led.c
+++ b/drivers/net/wireless/iwlwifi/dvm/led.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
diff --git a/drivers/net/wireless/iwlwifi/dvm/led.h b/drivers/net/wireless/iwlwifi/dvm/led.h
index 8749dcfe695f..6a0817d9c4fa 100644
--- a/drivers/net/wireless/iwlwifi/dvm/led.h
+++ b/drivers/net/wireless/iwlwifi/dvm/led.h
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 3d5bdc4217a8..576f7ee38ca5 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -2,7 +2,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,6 @@
 #include <linux/etherdevice.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/sched.h>
 #include <net/mac80211.h>
 
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index cae4d3182e33..c24d1d3d55f6 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -28,7 +28,6 @@
  *****************************************************************************/
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
@@ -155,9 +154,9 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
 			ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
 	}
 
-	hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
-			    WIPHY_FLAG_DISABLE_BEACON_HINTS |
-			    WIPHY_FLAG_IBSS_RSN;
+	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+	hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+				       REGULATORY_DISABLE_BEACON_HINTS;
 
 #ifdef CONFIG_PM_SLEEP
 	if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
@@ -322,12 +321,6 @@ static void iwlagn_mac_stop(struct ieee80211_hw *hw)
 
 	flush_workqueue(priv->workqueue);
 
-	/* User space software may expect getting rfkill changes
-	 * even if interface is down, trans->down will leave the RF
-	 * kill interrupt enabled
-	 */
-	iwl_trans_stop_hw(priv->trans, false);
-
 	IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
@@ -413,9 +406,8 @@ static bool iwl_resume_status_fn(struct iwl_notif_wait_data *notif_wait,
 {
 	struct iwl_resume_data *resume_data = data;
 	struct iwl_priv *priv = resume_data->priv;
-	u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
 
-	if (len - 4 != sizeof(*resume_data->cmd)) {
+	if (iwl_rx_packet_payload_len(pkt) != sizeof(*resume_data->cmd)) {
 		IWL_ERR(priv, "rx wrong size data\n");
 		return true;
 	}
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 7aad766865cf..ba1b1ea54252 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -1313,7 +1313,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
 	}
 
 	/* Reset chip to save power until we load uCode during "up". */
-	iwl_trans_stop_hw(priv->trans, false);
+	iwl_trans_stop_device(priv->trans);
 
 	priv->nvm_data = iwl_parse_eeprom_data(priv->trans->dev, priv->cfg,
 						  priv->eeprom_blob,
@@ -1458,7 +1458,7 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
 
 	dev_kfree_skb(priv->beacon_skb);
 
-	iwl_trans_stop_hw(priv->trans, true);
+	iwl_trans_op_mode_leave(priv->trans);
 	ieee80211_free_hw(priv->hw);
 }
 
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c
index 77cb59712235..b4e61417013a 100644
--- a/drivers/net/wireless/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/iwlwifi/dvm/power.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -30,7 +30,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <net/mac80211.h>
 #include "iwl-io.h"
 #include "iwl-debug.h"
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.h b/drivers/net/wireless/iwlwifi/dvm/power.h
index 7b03e1342d47..570d3a5e4670 100644
--- a/drivers/net/wireless/iwlwifi/dvm/power.h
+++ b/drivers/net/wireless/iwlwifi/dvm/power.h
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index b647e506564c..0977d93b529d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -24,7 +24,6 @@
  *
  *****************************************************************************/
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/skbuff.h>
 #include <linux/slab.h>
 #include <net/mac80211.h>
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index 26fc550cd68c..bdd5644a400b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -389,13 +389,6 @@ struct iwl_lq_sta {
 	u8 last_bt_traffic;
 };
 
-static inline u8 num_of_ant(u8 mask)
-{
-	return  !!((mask) & ANT_A) +
-		!!((mask) & ANT_B) +
-		!!((mask) & ANT_C);
-}
-
 static inline u8 first_antenna(u8 mask)
 {
 	if (mask & ANT_A)
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index d71776dd1e6a..7a1bc1c547e1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portionhelp of the ieee80211 subsystem header files.
@@ -205,8 +205,7 @@ static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
 					     struct iwl_device_cmd *cmd)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	u32 __maybe_unused len =
-		le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+	u32 __maybe_unused len = iwl_rx_packet_len(pkt);
 	IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
 			"notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len);
 	iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
@@ -457,7 +456,7 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
 	const int reg_recalib_period = 60;
 	int change;
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+	u32 len = iwl_rx_packet_payload_len(pkt);
 	__le32 *flag;
 	struct statistics_general_common *common;
 	struct statistics_rx_non_phy *rx_non_phy;
@@ -467,8 +466,6 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
 	struct statistics_tx *tx;
 	struct statistics_bt_activity *bt_activity;
 
-	len -= sizeof(struct iwl_cmd_header); /* skip header */
-
 	IWL_DEBUG_RX(priv, "Statistics notification received (%d bytes).\n",
 		     len);
 
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index d7ce2f12a907..503a81e58185 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index 35e0ee8b4e5b..be98b913ed58 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -2,7 +2,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -544,7 +544,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
 		channel = chan->hw_value;
 		scan_ch->channel = cpu_to_le16(channel);
 
-		if (!is_active || (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+		if (!is_active || (chan->flags & IEEE80211_CHAN_NO_IR))
 			scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
 		else
 			scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index c3c13ce96eb0..c0d070c5df5e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c
index fbeee081ee2f..058c5892c427 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -30,7 +30,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <net/mac80211.h>
 #include "iwl-io.h"
 #include "iwl-modparams.h"
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.h b/drivers/net/wireless/iwlwifi/dvm/tt.h
index 9356c4b908ca..507726534b84 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.h
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.h
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 1fef5240e6ad..a6839dfcb82d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -2,7 +2,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/ieee80211.h>
 #include "iwl-io.h"
@@ -368,6 +367,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
 		goto drop_unlock_priv;
 
 	memset(dev_cmd, 0, sizeof(*dev_cmd));
+	dev_cmd->hdr.cmd = REPLY_TX;
 	tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
 
 	/* Total # bytes to be transmitted */
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 63637949a146..cf03ef5619d9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -2,7 +2,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -28,7 +28,6 @@
  *****************************************************************************/
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 
 #include "iwl-io.h"
 #include "iwl-agn-hw.h"
@@ -389,7 +388,6 @@ static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
 {
 	struct iwl_priv *priv = data;
 	struct iwl_calib_hdr *hdr;
-	int len;
 
 	if (pkt->hdr.cmd != CALIBRATION_RES_NOTIFICATION) {
 		WARN_ON(pkt->hdr.cmd != CALIBRATION_COMPLETE_NOTIFICATION);
@@ -397,12 +395,8 @@ static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
 	}
 
 	hdr = (struct iwl_calib_hdr *)pkt->data;
-	len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
 
-	/* reduce the size by the length field itself */
-	len -= sizeof(__le32);
-
-	if (iwl_calib_set(priv, hdr, len))
+	if (iwl_calib_set(priv, hdr, iwl_rx_packet_payload_len(pkt)))
 		IWL_ERR(priv, "Failed to record calibration data %d\n",
 			hdr->op_code);
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 0d2afe098afc..854ba84ccb73 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index c727ec7c90a6..3e63323637f3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index ecc01e1a61a1..6674f2c4541c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 8ac305be68f4..8048de90233f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 3c34a72a5d64..2a59da2ff87a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -108,7 +108,7 @@ static const struct iwl_base_params iwl7000_base_params = {
 };
 
 static const struct iwl_ht_params iwl7000_ht_params = {
-	.use_rts_for_aggregation = true, /* use rts/cts protection */
+	.stbc = true,
 	.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
 };
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index 6d73f943cefa..7f37fb86837b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 03fd9aa8bfda..1ced525157dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -129,6 +129,12 @@ enum iwl_led_mode {
 #define ANT_BC		(ANT_B | ANT_C)
 #define ANT_ABC		(ANT_A | ANT_B | ANT_C)
 
+static inline u8 num_of_ant(u8 mask)
+{
+	return  !!((mask) & ANT_A) +
+		!!((mask) & ANT_B) +
+		!!((mask) & ANT_C);
+}
 
 /*
  * @max_ll_items: max number of OTP blocks
@@ -156,12 +162,14 @@ struct iwl_base_params {
 };
 
 /*
+ * @stbc: support Tx STBC and 1*SS Rx STBC
  * @use_rts_for_aggregation: use rts/cts protection for HT traffic
  * @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40
  */
 struct iwl_ht_params {
 	enum ieee80211_smps_mode smps_mode;
 	const bool ht_greenfield_support; /* if used set to true */
+	const bool stbc;
 	bool use_rts_for_aggregation;
 	u8 ht40_bands;
 };
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index da4eca8b3007..9d325516c42d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -198,7 +198,8 @@
 				 CSR_INT_BIT_RF_KILL | \
 				 CSR_INT_BIT_SW_RX   | \
 				 CSR_INT_BIT_WAKEUP  | \
-				 CSR_INT_BIT_ALIVE)
+				 CSR_INT_BIT_ALIVE   | \
+				 CSR_INT_BIT_RX_PERIODIC)
 
 /* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
 #define CSR_FH_INT_BIT_ERR       (1 << 31) /* Error */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index b2bb32a781dd..a75aac986a23 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project.
  *
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 8f61c717f619..23e7351e02de 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2009 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 684c416d3493..78bd41bf34b0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2009 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index ff570027e9dd..c3728163be46 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -322,6 +322,41 @@ static void set_sec_offset(struct iwl_firmware_pieces *pieces,
 	pieces->img[type].sec[sec].offset = offset;
 }
 
+static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
+{
+	int i, j;
+	struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data;
+	struct iwl_fw_cipher_scheme *fwcs;
+	struct ieee80211_cipher_scheme *cs;
+	u32 cipher;
+
+	if (len < sizeof(*l) ||
+	    len < sizeof(l->size) + l->size * sizeof(l->cs[0]))
+		return -EINVAL;
+
+	for (i = 0, j = 0; i < IWL_UCODE_MAX_CS && i < l->size; i++) {
+		fwcs = &l->cs[j];
+		cipher = le32_to_cpu(fwcs->cipher);
+
+		/* we skip schemes with zero cipher suite selector */
+		if (!cipher)
+			continue;
+
+		cs = &fw->cs[j++];
+		cs->cipher = cipher;
+		cs->iftype = BIT(NL80211_IFTYPE_STATION);
+		cs->hdr_len = fwcs->hdr_len;
+		cs->pn_len = fwcs->pn_len;
+		cs->pn_off = fwcs->pn_off;
+		cs->key_idx_off = fwcs->key_idx_off;
+		cs->key_idx_mask = fwcs->key_idx_mask;
+		cs->key_idx_shift = fwcs->key_idx_shift;
+		cs->mic_len = fwcs->mic_len;
+	}
+
+	return 0;
+}
+
 /*
  * Gets uCode section from tlv.
  */
@@ -729,6 +764,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
 				return -EINVAL;
 			}
 			break;
+		case IWL_UCODE_TLV_CSCHEME:
+			if (iwl_store_cscheme(&drv->fw, tlv_data, tlv_len))
+				goto invalid_tlv_len;
+			break;
 		default:
 			IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
 			break;
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 429337a2b9a1..592c01e11013 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -67,7 +67,7 @@
 /* for all modules */
 #define DRV_NAME        "iwlwifi"
 #define IWLWIFI_VERSION "in-tree:"
-#define DRV_COPYRIGHT	"Copyright(c) 2003-2013 Intel Corporation"
+#define DRV_COPYRIGHT	"Copyright(c) 2003- 2014 Intel Corporation"
 #define DRV_AUTHOR     "<ilw@linux.intel.com>"
 
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index 4c887f365908..c44cf1149648 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -614,10 +614,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
 			channel->flags = IEEE80211_CHAN_NO_HT40;
 
 			if (!(eeprom_ch->flags & EEPROM_CHANNEL_IBSS))
-				channel->flags |= IEEE80211_CHAN_NO_IBSS;
+				channel->flags |= IEEE80211_CHAN_NO_IR;
 
 			if (!(eeprom_ch->flags & EEPROM_CHANNEL_ACTIVE))
-				channel->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+				channel->flags |= IEEE80211_CHAN_NO_IR;
 
 			if (eeprom_ch->flags & EEPROM_CHANNEL_RADAR)
 				channel->flags |= IEEE80211_CHAN_RADAR;
@@ -751,6 +751,13 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
 	ht_info->ht_supported = true;
 	ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
 
+	if (cfg->ht_params->stbc) {
+		ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+		if (tx_chains > 1)
+			ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
+	}
+
 	if (iwlwifi_mod_params.amsdu_size_8K)
 		ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index d73304a23ec2..e3c7deafabe6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
index e5f2e362ab0b..25d0105741db 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
index 8e941f8bd7d6..a6d3bdf82cdd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 484d318245fb..9564ae173d06 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 6c6c35c5228c..88e2d6eb569f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -125,6 +125,7 @@ enum iwl_ucode_tlv_type {
 	IWL_UCODE_TLV_SECURE_SEC_INIT	= 25,
 	IWL_UCODE_TLV_SECURE_SEC_WOWLAN	= 26,
 	IWL_UCODE_TLV_NUM_OF_CPU	= 27,
+	IWL_UCODE_TLV_CSCHEME		= 28,
 };
 
 struct iwl_ucode_tlv {
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 75db087120c3..5f1493c44097 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -92,6 +92,9 @@
  * @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
  * @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
  *	containing CAM (Continuous Active Mode) indication.
+ * @IWL_UCODE_TLV_FLAGS_P2P_PS: P2P client power save is supported (only on a
+ *	single bound interface).
+ * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
  */
 enum iwl_ucode_tlv_flag {
 	IWL_UCODE_TLV_FLAGS_PAN			= BIT(0),
@@ -113,7 +116,9 @@ enum iwl_ucode_tlv_flag {
 	IWL_UCODE_TLV_FLAGS_SCHED_SCAN		= BIT(17),
 	IWL_UCODE_TLV_FLAGS_STA_KEY_CMD		= BIT(19),
 	IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD	= BIT(20),
+	IWL_UCODE_TLV_FLAGS_P2P_PS		= BIT(21),
 	IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT	= BIT(24),
+	IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD	= BIT(26),
 };
 
 /* The default calibrate table size if not specified by firmware file */
@@ -209,6 +214,44 @@ enum iwl_fw_phy_cfg {
 	FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS,
 };
 
+#define IWL_UCODE_MAX_CS		1
+
+/**
+ * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW.
+ * @cipher: a cipher suite selector
+ * @flags: cipher scheme flags (currently reserved for a future use)
+ * @hdr_len: a size of MPDU security header
+ * @pn_len: a size of PN
+ * @pn_off: an offset of pn from the beginning of the security header
+ * @key_idx_off: an offset of key index byte in the security header
+ * @key_idx_mask: a bit mask of key_idx bits
+ * @key_idx_shift: bit shift needed to get key_idx
+ * @mic_len: mic length in bytes
+ * @hw_cipher: a HW cipher index used in host commands
+ */
+struct iwl_fw_cipher_scheme {
+	__le32 cipher;
+	u8 flags;
+	u8 hdr_len;
+	u8 pn_len;
+	u8 pn_off;
+	u8 key_idx_off;
+	u8 key_idx_mask;
+	u8 key_idx_shift;
+	u8 mic_len;
+	u8 hw_cipher;
+} __packed;
+
+/**
+ * struct iwl_fw_cscheme_list - a cipher scheme list
+ * @size: a number of entries
+ * @cs: cipher scheme entries
+ */
+struct iwl_fw_cscheme_list {
+	u8 size;
+	struct iwl_fw_cipher_scheme cs[];
+} __packed;
+
 /**
  * struct iwl_fw - variables associated with the firmware
  *
@@ -224,6 +267,7 @@ enum iwl_fw_phy_cfg {
  * @inst_evtlog_size: event log size for runtime ucode.
  * @inst_errlog_ptr: error log offfset for runtime ucode.
  * @mvm_fw: indicates this is MVM firmware
+ * @cipher_scheme: optional external cipher scheme.
  */
 struct iwl_fw {
 	u32 ucode_ver;
@@ -243,6 +287,8 @@ struct iwl_fw {
 	u32 phy_config;
 
 	bool mvm_fw;
+
+	struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
 };
 
 static inline u8 iwl_fw_valid_tx_ant(const struct iwl_fw *fw)
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index ad8e19a56eca..f98175a0d35b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project.
  *
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 63d10ec08dbc..c339c1bed080 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project.
  *
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index a1f580c0c6c6..0a84ade7edac 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
index 940b8a9d5285..b5bc959b1dfe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
index 2e2f1c8c99f9..95af97a6c2cf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index b76a9a8fc0b3..f06f4cbe1317 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -223,10 +223,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
 			channel->flags |= IEEE80211_CHAN_NO_160MHZ;
 
 		if (!(ch_flags & NVM_CHANNEL_IBSS))
-			channel->flags |= IEEE80211_CHAN_NO_IBSS;
+			channel->flags |= IEEE80211_CHAN_NO_IR;
 
 		if (!(ch_flags & NVM_CHANNEL_ACTIVE))
-			channel->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+			channel->flags |= IEEE80211_CHAN_NO_IR;
 
 		if (ch_flags & NVM_CHANNEL_RADAR)
 			channel->flags |= IEEE80211_CHAN_RADAR;
@@ -263,13 +263,19 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
 				  struct iwl_nvm_data *data,
 				  struct ieee80211_sta_vht_cap *vht_cap)
 {
+	int num_ants = num_of_ant(data->valid_rx_ant);
+
 	vht_cap->vht_supported = true;
 
 	vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 |
 		       IEEE80211_VHT_CAP_RXSTBC_1 |
 		       IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+		       3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
 		       7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
 
+	if (num_ants > 1)
+		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
+
 	if (iwlwifi_mod_params.amsdu_size_8K)
 		vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
 
@@ -283,7 +289,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
 			    IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
 			    IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
 
-	if (data->valid_rx_ant == 1 || cfg->rx_with_siso_diversity) {
+	if (num_ants == 1 ||
+	    cfg->rx_with_siso_diversity) {
 		vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
 				IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
 		/* this works because NOT_SUPPORTED == 3 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
index 3325059c52d4..0c4399aba8c6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 976448a57d02..b5be51f3cd3d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -155,14 +155,12 @@ void iwl_opmode_deregister(const char *name);
 
 /**
  * struct iwl_op_mode - operational mode
+ * @ops - pointer to its own ops
  *
  * This holds an implementation of the mac80211 / fw API.
- *
- * @ops - pointer to its own ops
  */
 struct iwl_op_mode {
 	const struct iwl_op_mode_ops *ops;
-	const struct iwl_trans *trans;
 
 	char op_mode_specific[0] __aligned(sizeof(void *));
 };
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index 1a405ae6a9c5..fa77d63a277a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
index ce983af79644..9ee18d0d2d01 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.h
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index a70c7b9d9bad..100bd0d79681 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -102,6 +102,9 @@
 /* Device system time */
 #define DEVICE_SYSTEM_TIME_REG 0xA0206C
 
+/* Device NMI register */
+#define DEVICE_SET_NMI_REG 0x00a01c30
+
 /*****************************************************************************
  *                        7000/3000 series SHR DTS addresses                 *
  *****************************************************************************/
@@ -274,4 +277,8 @@ static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
 
 /*********************** END TX SCHEDULER *************************************/
 
+/* Oscillator clock */
+#define OSC_CLK				(0xa04068)
+#define OSC_CLK_FORCE_CONTROL		(0x8)
+
 #endif				/* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 143292b4dbbf..1f065cf4a4ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,7 @@
 #include "iwl-debug.h"
 #include "iwl-config.h"
 #include "iwl-fw.h"
+#include "iwl-op-mode.h"
 
 /**
  * DOC: Transport layer - what is it ?
@@ -100,8 +101,7 @@
  *	   start_fw
  *
  *	5) Then when finished (or reset):
- *	   stop_fw (a.k.a. stop device for the moment)
- *	   stop_hw
+ *	   stop_device
  *
  *	6) Eventually, the free function will be called.
  */
@@ -176,6 +176,16 @@ struct iwl_rx_packet {
 	u8 data[];
 } __packed;
 
+static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
+{
+	return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+}
+
+static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
+{
+	return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
+}
+
 /**
  * enum CMD_MODE - how to send the host commands ?
  *
@@ -318,6 +328,24 @@ enum iwl_d3_status {
 };
 
 /**
+ * enum iwl_trans_status: transport status flags
+ * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
+ * @STATUS_DEVICE_ENABLED: APM is enabled
+ * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
+ * @STATUS_INT_ENABLED: interrupts are enabled
+ * @STATUS_RFKILL: the HW RFkill switch is in KILL position
+ * @STATUS_FW_ERROR: the fw is in error state
+ */
+enum iwl_trans_status {
+	STATUS_SYNC_HCMD_ACTIVE,
+	STATUS_DEVICE_ENABLED,
+	STATUS_TPOWER_PMI,
+	STATUS_INT_ENABLED,
+	STATUS_RFKILL,
+	STATUS_FW_ERROR,
+};
+
+/**
  * struct iwl_trans_config - transport configuration
  *
  * @op_mode: pointer to the upper layer.
@@ -361,9 +389,7 @@ struct iwl_trans;
  *
  * @start_hw: starts the HW- from that point on, the HW can send interrupts
  *	May sleep
- * @stop_hw: stops the HW- from that point on, the HW will be in low power but
- *	will still issue interrupt if the HW RF kill is triggered unless
- *	op_mode_leaving is true.
+ * @op_mode_leave: Turn off the HW RF kill indication if on
  *	May sleep
  * @start_fw: allocates and inits all the resources for the transport
  *	layer. Also kick a fw image.
@@ -371,8 +397,11 @@ struct iwl_trans;
  * @fw_alive: called when the fw sends alive notification. If the fw provides
  *	the SCD base address in SRAM, then provide it here, or 0 otherwise.
  *	May sleep
- * @stop_device:stops the whole device (embedded CPU put to reset)
- *	May sleep
+ * @stop_device: stops the whole device (embedded CPU put to reset) and stops
+ *	the HW. From that point on, the HW will be in low power but will still
+ *	issue interrupt if the HW RF kill is triggered. This callback must do
+ *	the right thing and not crash even if start_hw() was called but not
+ *	start_fw(). May sleep
  * @d3_suspend: put the device into the correct mode for WoWLAN during
  *	suspend. This is optional, if not implemented WoWLAN will not be
  *	supported. This callback may sleep.
@@ -418,7 +447,7 @@ struct iwl_trans;
 struct iwl_trans_ops {
 
 	int (*start_hw)(struct iwl_trans *iwl_trans);
-	void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving);
+	void (*op_mode_leave)(struct iwl_trans *iwl_trans);
 	int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
 			bool run_in_rfkill);
 	void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
@@ -479,6 +508,7 @@ enum iwl_trans_state {
  * @ops - pointer to iwl_trans_ops
  * @op_mode - pointer to the op_mode
  * @cfg - pointer to the configuration
+ * @status: a bit-mask of transport status flags
  * @dev - pointer to struct device * that represents the device
  * @hw_id: a u32 with the ID of the device / subdevice.
  *	Set during transport allocation.
@@ -499,6 +529,7 @@ struct iwl_trans {
 	struct iwl_op_mode *op_mode;
 	const struct iwl_cfg *cfg;
 	enum iwl_trans_state state;
+	unsigned long status;
 
 	struct device *dev;
 	u32 hw_rev;
@@ -540,15 +571,14 @@ static inline int iwl_trans_start_hw(struct iwl_trans *trans)
 	return trans->ops->start_hw(trans);
 }
 
-static inline void iwl_trans_stop_hw(struct iwl_trans *trans,
-				     bool op_mode_leaving)
+static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
 {
 	might_sleep();
 
-	trans->ops->stop_hw(trans, op_mode_leaving);
+	if (trans->ops->op_mode_leave)
+		trans->ops->op_mode_leave(trans);
 
-	if (op_mode_leaving)
-		trans->op_mode = NULL;
+	trans->op_mode = NULL;
 
 	trans->state = IWL_TRANS_NO_FW;
 }
@@ -570,6 +600,7 @@ static inline int iwl_trans_start_fw(struct iwl_trans *trans,
 
 	WARN_ON_ONCE(!trans->rx_mpdu_cmd);
 
+	clear_bit(STATUS_FW_ERROR, &trans->status);
 	return trans->ops->start_fw(trans, fw, run_in_rfkill);
 }
 
@@ -601,6 +632,13 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
 {
 	int ret;
 
+	if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+		     test_bit(STATUS_RFKILL, &trans->status)))
+		return -ERFKILL;
+
+	if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
+		return -EIO;
+
 	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
 		IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
 		return -EIO;
@@ -640,6 +678,9 @@ static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
 			       struct iwl_device_cmd *dev_cmd, int queue)
 {
+	if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
+		return -EIO;
+
 	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
 		IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
 
@@ -657,9 +698,6 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
 
 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue)
 {
-	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
-		IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
-
 	trans->ops->txq_disable(trans, queue);
 }
 
@@ -760,7 +798,8 @@ static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
 
 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
 {
-	trans->ops->set_pmi(trans, state);
+	if (trans->ops->set_pmi)
+		trans->ops->set_pmi(trans, state);
 }
 
 static inline void
@@ -780,6 +819,16 @@ iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
 	__release(nic_access);
 }
 
+static inline void iwl_trans_fw_error(struct iwl_trans *trans)
+{
+	if (WARN_ON_ONCE(!trans->op_mode))
+		return;
+
+	/* prevent double restarts due to the same erroneous FW */
+	if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
+		iwl_op_mode_nic_error(trans->op_mode);
+}
+
 /*****************************************************
 * driver (transport) register/unregister functions
 ******************************************************/
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index 6d73817850ce..f98ec2b23898 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -1,10 +1,10 @@
 obj-$(CONFIG_IWLMVM)   += iwlmvm.o
 iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
-iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o
+iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
 iwlmvm-y += scan.o time-event.o rs.o
 iwlmvm-y += power.o power_legacy.o bt-coex.o
 iwlmvm-y += led.o tt.o
-iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
+iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
 iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
 
 ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/mvm/binding.c b/drivers/net/wireless/iwlwifi/mvm/binding.c
index 93fd1457954b..a1376539d2dc 100644
--- a/drivers/net/wireless/iwlwifi/mvm/binding.c
+++ b/drivers/net/wireless/iwlwifi/mvm/binding.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -183,15 +183,29 @@ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 	if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
 		return -EINVAL;
 
+	/*
+	 * Update SF - Disable if needed. if this fails, SF might still be on
+	 * while many macs are bound, which is forbidden - so fail the binding.
+	 */
+	if (iwl_mvm_sf_update(mvm, vif, false))
+		return -EINVAL;
+
 	return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, true);
 }
 
 int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	int ret;
 
 	if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
 		return -EINVAL;
 
-	return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false);
+	ret = iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false);
+
+	if (!ret)
+		if (iwl_mvm_sf_update(mvm, vif, true))
+			IWL_ERR(mvm, "Failed to update SF state\n");
+
+	return ret;
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 75b72a956552..76cde6ce6551 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -294,9 +294,9 @@ static const __le64 iwl_ci_mask[][3] = {
 		cpu_to_le64(0x0)
 	},
 	{
-		cpu_to_le64(0xFE00000000ULL),
+		cpu_to_le64(0xFFC0000000ULL),
 		cpu_to_le64(0x0ULL),
-		cpu_to_le64(0x0)
+		cpu_to_le64(0x0ULL)
 	},
 };
 
@@ -396,7 +396,8 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
 					    BT_VALID_ANT_ISOLATION |
 					    BT_VALID_ANT_ISOLATION_THRS |
 					    BT_VALID_TXTX_DELTA_FREQ_THRS |
-					    BT_VALID_TXRX_MAX_FREQ_0);
+					    BT_VALID_TXRX_MAX_FREQ_0 |
+					    BT_VALID_SYNC_TO_SCO);
 
 	if (mvm->cfg->bt_shared_single_ant)
 		memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
@@ -514,7 +515,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
 	if (IS_ERR_OR_NULL(sta))
 		return 0;
 
-	mvmsta = (void *)sta->drv_priv;
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
 	/* nothing to do */
 	if (mvmsta->bt_reduced_txpower == enable)
@@ -846,7 +847,7 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
 	if (IS_ERR_OR_NULL(sta))
 		return;
 
-	mvmsta = (void *)sta->drv_priv;
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
 	data->num_bss_ifaces++;
 
@@ -917,11 +918,11 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
 				   struct ieee80211_sta *sta)
 {
-	struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	enum iwl_bt_coex_lut_type lut_type;
 
 	if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
-	    BT_LOW_TRAFFIC)
+	    BT_HIGH_TRAFFIC)
 		return LINK_QUAL_AGG_TIME_LIMIT_DEF;
 
 	lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
@@ -936,7 +937,7 @@ u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
 bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
 				     struct ieee80211_sta *sta)
 {
-	struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
 	if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
 	    BT_HIGH_TRAFFIC)
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index 4b6d670c3509..036857698565 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index b9b81e881dd0..f36a7ee0267f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -886,8 +886,7 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
 	if (err)
 		return err;
 
-	size = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-	size -= sizeof(cmd.resp_pkt->hdr);
+	size = iwl_rx_packet_payload_len(cmd.resp_pkt);
 	if (size < sizeof(__le16)) {
 		err = -EINVAL;
 	} else {
@@ -1211,15 +1210,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 	if (ret)
 		goto out;
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-	len = le32_to_cpu(d3_cfg_cmd.resp_pkt->len_n_flags) &
-		FH_RSCSR_FRAME_SIZE_MSK;
-	if (len >= sizeof(u32) * 2) {
+	len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt);
+	if (len >= sizeof(u32)) {
 		mvm->d3_test_pme_ptr =
 			le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
-	} else if (test) {
-		/* in test mode we require the pointer */
-		ret = -EIO;
-		goto out;
 	}
 #endif
 	iwl_free_resp(&d3_cfg_cmd);
@@ -1231,10 +1225,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 	mvm->aux_sta.sta_id = old_aux_sta_id;
 	mvm_ap_sta->sta_id = old_ap_sta_id;
 	mvmvif->ap_sta_id = old_ap_sta_id;
- out_noreset:
-	kfree(key_data.rsc_tsc);
+
 	if (ret < 0)
 		ieee80211_restart_hw(mvm->hw);
+ out_noreset:
+	kfree(key_data.rsc_tsc);
 
 	mutex_unlock(&mvm->mutex);
 
@@ -1537,10 +1532,16 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
 	struct iwl_mvm_d3_gtk_iter_data gtkdata = {
 		.status = status,
 	};
+	u32 disconnection_reasons =
+		IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+		IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
 
 	if (!status || !vif->bss_conf.bssid)
 		return false;
 
+	if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons)
+		return false;
+
 	/* find last GTK that we used initially, if any */
 	gtkdata.find_phase = true;
 	ieee80211_iter_keys(mvm->hw, vif,
@@ -1665,8 +1666,8 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
 	else
 		status_size = sizeof(struct iwl_wowlan_status_v4);
 
-	len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-	if (len - sizeof(struct iwl_cmd_header) < status_size) {
+	len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+	if (len < status_size) {
 		IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
 		goto out_free_resp;
 	}
@@ -1701,8 +1702,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
 		status.wake_packet = status_v4->wake_packet;
 	}
 
-	if (len - sizeof(struct iwl_cmd_header) !=
-	    status_size + ALIGN(status.wake_packet_bufsize, 4)) {
+	if (len != status_size + ALIGN(status.wake_packet_bufsize, 4)) {
 		IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
 		goto out_free_resp;
 	}
@@ -1805,6 +1805,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
 	iwl_mvm_read_d3_sram(mvm);
 
 	keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if (keep)
+		mvm->keep_vif = vif;
+#endif
 	/* has unlocked the mutex, so skip that */
 	goto out;
 
@@ -1861,6 +1865,7 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
 		return err;
 	}
 	mvm->d3_test_active = true;
+	mvm->keep_vif = NULL;
 	return 0;
 }
 
@@ -1871,10 +1876,14 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
 	u32 pme_asserted;
 
 	while (true) {
-		pme_asserted = iwl_trans_read_mem32(mvm->trans,
-						    mvm->d3_test_pme_ptr);
-		if (pme_asserted)
-			break;
+		/* read pme_ptr if available */
+		if (mvm->d3_test_pme_ptr) {
+			pme_asserted = iwl_trans_read_mem32(mvm->trans,
+						mvm->d3_test_pme_ptr);
+			if (pme_asserted)
+				break;
+		}
+
 		if (msleep_interruptible(100))
 			break;
 	}
@@ -1885,6 +1894,10 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
 static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
 					      struct ieee80211_vif *vif)
 {
+	/* skip the one we keep connection on */
+	if (_data == vif)
+		return;
+
 	if (vif->type == NL80211_IFTYPE_STATION)
 		ieee80211_connection_loss(vif);
 }
@@ -1911,7 +1924,7 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
 
 	ieee80211_iterate_active_interfaces_atomic(
 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-		iwl_mvm_d3_test_disconn_work_iter, NULL);
+		iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
 
 	ieee80211_wake_queues(mvm->hw);
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
new file mode 100644
index 000000000000..0e29cd83a06a
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -0,0 +1,546 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+#include "debugfs.h"
+
+static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
+				 struct ieee80211_vif *vif,
+				 enum iwl_dbgfs_pm_mask param, int val)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm;
+
+	dbgfs_pm->mask |= param;
+
+	switch (param) {
+	case MVM_DEBUGFS_PM_KEEP_ALIVE: {
+		struct ieee80211_hw *hw = mvm->hw;
+		int dtimper = hw->conf.ps_dtim_period ?: 1;
+		int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+
+		IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
+		if (val * MSEC_PER_SEC < 3 * dtimper_msec)
+			IWL_WARN(mvm,
+				 "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n",
+				 val * MSEC_PER_SEC, 3 * dtimper_msec);
+		dbgfs_pm->keep_alive_seconds = val;
+		break;
+	}
+	case MVM_DEBUGFS_PM_SKIP_OVER_DTIM:
+		IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n",
+				val ? "enabled" : "disabled");
+		dbgfs_pm->skip_over_dtim = val;
+		break;
+	case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS:
+		IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val);
+		dbgfs_pm->skip_dtim_periods = val;
+		break;
+	case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT:
+		IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val);
+		dbgfs_pm->rx_data_timeout = val;
+		break;
+	case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT:
+		IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
+		dbgfs_pm->tx_data_timeout = val;
+		break;
+	case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
+		IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
+		dbgfs_pm->disable_power_off = val;
+		break;
+	case MVM_DEBUGFS_PM_LPRX_ENA:
+		IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
+		dbgfs_pm->lprx_ena = val;
+		break;
+	case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD:
+		IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
+		dbgfs_pm->lprx_rssi_threshold = val;
+		break;
+	case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
+		IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
+		dbgfs_pm->snooze_ena = val;
+		break;
+	case MVM_DEBUGFS_PM_UAPSD_MISBEHAVING:
+		IWL_DEBUG_POWER(mvm, "uapsd_misbehaving_enable=%d\n", val);
+		dbgfs_pm->uapsd_misbehaving = val;
+		break;
+	}
+}
+
+static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	enum iwl_dbgfs_pm_mask param;
+	int val, ret;
+
+	if (!strncmp("keep_alive=", buf, 11)) {
+		if (sscanf(buf + 11, "%d", &val) != 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_KEEP_ALIVE;
+	} else if (!strncmp("skip_over_dtim=", buf, 15)) {
+		if (sscanf(buf + 15, "%d", &val) != 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM;
+	} else if (!strncmp("skip_dtim_periods=", buf, 18)) {
+		if (sscanf(buf + 18, "%d", &val) != 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS;
+	} else if (!strncmp("rx_data_timeout=", buf, 16)) {
+		if (sscanf(buf + 16, "%d", &val) != 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT;
+	} else if (!strncmp("tx_data_timeout=", buf, 16)) {
+		if (sscanf(buf + 16, "%d", &val) != 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
+	} else if (!strncmp("disable_power_off=", buf, 18) &&
+		   !(mvm->fw->ucode_capa.flags &
+		     IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
+		if (sscanf(buf + 18, "%d", &val) != 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
+	} else if (!strncmp("lprx=", buf, 5)) {
+		if (sscanf(buf + 5, "%d", &val) != 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_LPRX_ENA;
+	} else if (!strncmp("lprx_rssi_threshold=", buf, 20)) {
+		if (sscanf(buf + 20, "%d", &val) != 1)
+			return -EINVAL;
+		if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val <
+		    POWER_LPRX_RSSI_THRESHOLD_MIN)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
+	} else if (!strncmp("snooze_enable=", buf, 14)) {
+		if (sscanf(buf + 14, "%d", &val) != 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
+	} else if (!strncmp("uapsd_misbehaving=", buf, 18)) {
+		if (sscanf(buf + 18, "%d", &val) != 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_UAPSD_MISBEHAVING;
+	} else {
+		return -EINVAL;
+	}
+
+	mutex_lock(&mvm->mutex);
+	iwl_dbgfs_update_pm(mvm, vif, param, val);
+	ret = iwl_mvm_power_update_mode(mvm, vif);
+	mutex_unlock(&mvm->mutex);
+
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char buf[512];
+	int bufsz = sizeof(buf);
+	int pos;
+
+	pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	u8 ap_sta_id;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	char buf[512];
+	int bufsz = sizeof(buf);
+	int pos = 0;
+	int i;
+
+	mutex_lock(&mvm->mutex);
+
+	ap_sta_id = mvmvif->ap_sta_id;
+
+	pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
+			 mvmvif->id, mvmvif->color);
+	pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
+			 vif->bss_conf.bssid);
+	pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n");
+	for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++)
+		pos += scnprintf(buf+pos, bufsz-pos,
+				 "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n",
+				 i, mvmvif->queue_params[i].txop,
+				 mvmvif->queue_params[i].cw_min,
+				 mvmvif->queue_params[i].cw_max,
+				 mvmvif->queue_params[i].aifs,
+				 mvmvif->queue_params[i].uapsd);
+
+	if (vif->type == NL80211_IFTYPE_STATION &&
+	    ap_sta_id != IWL_MVM_STATION_COUNT) {
+		struct ieee80211_sta *sta;
+
+		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id],
+						lockdep_is_held(&mvm->mutex));
+		if (!IS_ERR_OR_NULL(sta)) {
+			struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+
+			pos += scnprintf(buf+pos, bufsz-pos,
+					 "ap_sta_id %d - reduced Tx power %d\n",
+					 ap_sta_id,
+					 mvm_sta->bt_reduced_txpower);
+		}
+	}
+
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(vif->chanctx_conf);
+	if (chanctx_conf)
+		pos += scnprintf(buf+pos, bufsz-pos,
+				 "idle rx chains %d, active rx chains: %d\n",
+				 chanctx_conf->rx_chains_static,
+				 chanctx_conf->rx_chains_dynamic);
+	rcu_read_unlock();
+
+	mutex_unlock(&mvm->mutex);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
+				enum iwl_dbgfs_bf_mask param, int value)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
+
+	dbgfs_bf->mask |= param;
+
+	switch (param) {
+	case MVM_DEBUGFS_BF_ENERGY_DELTA:
+		dbgfs_bf->bf_energy_delta = value;
+		break;
+	case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA:
+		dbgfs_bf->bf_roaming_energy_delta = value;
+		break;
+	case MVM_DEBUGFS_BF_ROAMING_STATE:
+		dbgfs_bf->bf_roaming_state = value;
+		break;
+	case MVM_DEBUGFS_BF_TEMP_THRESHOLD:
+		dbgfs_bf->bf_temp_threshold = value;
+		break;
+	case MVM_DEBUGFS_BF_TEMP_FAST_FILTER:
+		dbgfs_bf->bf_temp_fast_filter = value;
+		break;
+	case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER:
+		dbgfs_bf->bf_temp_slow_filter = value;
+		break;
+	case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
+		dbgfs_bf->bf_enable_beacon_filter = value;
+		break;
+	case MVM_DEBUGFS_BF_DEBUG_FLAG:
+		dbgfs_bf->bf_debug_flag = value;
+		break;
+	case MVM_DEBUGFS_BF_ESCAPE_TIMER:
+		dbgfs_bf->bf_escape_timer = value;
+		break;
+	case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT:
+		dbgfs_bf->ba_enable_beacon_abort = value;
+		break;
+	case MVM_DEBUGFS_BA_ESCAPE_TIMER:
+		dbgfs_bf->ba_escape_timer = value;
+		break;
+	}
+}
+
+static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	enum iwl_dbgfs_bf_mask param;
+	int value, ret = 0;
+
+	if (!strncmp("bf_energy_delta=", buf, 16)) {
+		if (sscanf(buf+16, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < IWL_BF_ENERGY_DELTA_MIN ||
+		    value > IWL_BF_ENERGY_DELTA_MAX)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BF_ENERGY_DELTA;
+	} else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) {
+		if (sscanf(buf+24, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN ||
+		    value > IWL_BF_ROAMING_ENERGY_DELTA_MAX)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA;
+	} else if (!strncmp("bf_roaming_state=", buf, 17)) {
+		if (sscanf(buf+17, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < IWL_BF_ROAMING_STATE_MIN ||
+		    value > IWL_BF_ROAMING_STATE_MAX)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BF_ROAMING_STATE;
+	} else if (!strncmp("bf_temp_threshold=", buf, 18)) {
+		if (sscanf(buf+18, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < IWL_BF_TEMP_THRESHOLD_MIN ||
+		    value > IWL_BF_TEMP_THRESHOLD_MAX)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
+	} else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
+		if (sscanf(buf+20, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < IWL_BF_TEMP_FAST_FILTER_MIN ||
+		    value > IWL_BF_TEMP_FAST_FILTER_MAX)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
+	} else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
+		if (sscanf(buf+20, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < IWL_BF_TEMP_SLOW_FILTER_MIN ||
+		    value > IWL_BF_TEMP_SLOW_FILTER_MAX)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
+	} else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
+		if (sscanf(buf+24, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < 0 || value > 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER;
+	} else if (!strncmp("bf_debug_flag=", buf, 14)) {
+		if (sscanf(buf+14, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < 0 || value > 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BF_DEBUG_FLAG;
+	} else if (!strncmp("bf_escape_timer=", buf, 16)) {
+		if (sscanf(buf+16, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < IWL_BF_ESCAPE_TIMER_MIN ||
+		    value > IWL_BF_ESCAPE_TIMER_MAX)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BF_ESCAPE_TIMER;
+	} else if (!strncmp("ba_escape_timer=", buf, 16)) {
+		if (sscanf(buf+16, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < IWL_BA_ESCAPE_TIMER_MIN ||
+		    value > IWL_BA_ESCAPE_TIMER_MAX)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BA_ESCAPE_TIMER;
+	} else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) {
+		if (sscanf(buf+23, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < 0 || value > 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT;
+	} else {
+		return -EINVAL;
+	}
+
+	mutex_lock(&mvm->mutex);
+	iwl_dbgfs_update_bf(vif, param, value);
+	if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
+		ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+	else
+		ret = iwl_mvm_enable_beacon_filter(mvm, vif);
+	mutex_unlock(&mvm->mutex);
+
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	char buf[256];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	struct iwl_beacon_filter_cmd cmd = {
+		IWL_BF_CMD_CONFIG_DEFAULTS,
+		.bf_enable_beacon_filter =
+			cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
+		.ba_enable_beacon_abort =
+			cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
+	};
+
+	iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
+	if (mvmvif->bf_data.bf_enabled)
+		cmd.bf_enable_beacon_filter = cpu_to_le32(1);
+	else
+		cmd.bf_enable_beacon_filter = 0;
+
+	pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
+			 le32_to_cpu(cmd.bf_energy_delta));
+	pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
+			 le32_to_cpu(cmd.bf_roaming_energy_delta));
+	pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
+			 le32_to_cpu(cmd.bf_roaming_state));
+	pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n",
+			 le32_to_cpu(cmd.bf_temp_threshold));
+	pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n",
+			 le32_to_cpu(cmd.bf_temp_fast_filter));
+	pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n",
+			 le32_to_cpu(cmd.bf_temp_slow_filter));
+	pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
+			 le32_to_cpu(cmd.bf_enable_beacon_filter));
+	pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
+			 le32_to_cpu(cmd.bf_debug_flag));
+	pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
+			 le32_to_cpu(cmd.bf_escape_timer));
+	pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
+			 le32_to_cpu(cmd.ba_escape_timer));
+	pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
+			 le32_to_cpu(cmd.ba_enable_beacon_abort));
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
+	_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+	_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
+#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do {		\
+		if (!debugfs_create_file(#name, mode, parent, vif,	\
+					 &iwl_dbgfs_##name##_ops))	\
+			goto err;					\
+	} while (0)
+
+MVM_DEBUGFS_READ_FILE_OPS(mac_params);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
+
+void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct dentry *dbgfs_dir = vif->debugfs_dir;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	char buf[100];
+
+	/*
+	 * Check if debugfs directory already exist before creating it.
+	 * This may happen when, for example, resetting hw or suspend-resume
+	 */
+	if (!dbgfs_dir || mvmvif->dbgfs_dir)
+		return;
+
+	mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
+	mvmvif->mvm = mvm;
+
+	if (!mvmvif->dbgfs_dir) {
+		IWL_ERR(mvm, "Failed to create debugfs directory under %s\n",
+			dbgfs_dir->d_name.name);
+		return;
+	}
+
+	if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+	    ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
+	     (vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
+	      mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS)))
+		MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
+					 S_IRUSR);
+
+	MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir,
+				 S_IRUSR);
+
+	if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+	    mvmvif == mvm->bf_allowed_vif)
+		MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
+					 S_IRUSR | S_IWUSR);
+
+	/*
+	 * Create symlink for convenience pointing to interface specific
+	 * debugfs entries for the driver. For example, under
+	 * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/
+	 * find
+	 * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
+	 */
+	snprintf(buf, 100, "../../../%s/%s/%s/%s",
+		 dbgfs_dir->d_parent->d_parent->d_name.name,
+		 dbgfs_dir->d_parent->d_name.name,
+		 dbgfs_dir->d_name.name,
+		 mvmvif->dbgfs_dir->d_name.name);
+
+	mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
+						     mvm->debugfs_dir, buf);
+	if (!mvmvif->dbgfs_slink)
+		IWL_ERR(mvm, "Can't create debugfs symbolic link under %s\n",
+			dbgfs_dir->d_name.name);
+	return;
+err:
+	IWL_ERR(mvm, "Can't create debugfs entity\n");
+}
+
+void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	debugfs_remove(mvmvif->dbgfs_slink);
+	mvmvif->dbgfs_slink = NULL;
+
+	debugfs_remove_recursive(mvmvif->dbgfs_dir);
+	mvmvif->dbgfs_dir = NULL;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index a8fe6b41f9a3..369d4c90e669 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -63,30 +63,18 @@
 #include "mvm.h"
 #include "sta.h"
 #include "iwl-io.h"
+#include "iwl-prph.h"
+#include "debugfs.h"
 
-struct iwl_dbgfs_mvm_ctx {
-	struct iwl_mvm *mvm;
-	struct ieee80211_vif *vif;
-};
-
-static ssize_t iwl_dbgfs_tx_flush_write(struct file *file,
-					const char __user *user_buf,
+static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
 					size_t count, loff_t *ppos)
 {
-	struct iwl_mvm *mvm = file->private_data;
-
-	char buf[16];
-	int buf_size, ret;
+	int ret;
 	u32 scd_q_msk;
 
 	if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
 		return -EIO;
 
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) - 1);
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-
 	if (sscanf(buf, "%x", &scd_q_msk) != 1)
 		return -EINVAL;
 
@@ -99,24 +87,15 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct file *file,
 	return ret;
 }
 
-static ssize_t iwl_dbgfs_sta_drain_write(struct file *file,
-					 const char __user *user_buf,
+static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
 					 size_t count, loff_t *ppos)
 {
-	struct iwl_mvm *mvm = file->private_data;
 	struct ieee80211_sta *sta;
-
-	char buf[8];
-	int buf_size, sta_id, drain, ret;
+	int sta_id, drain, ret;
 
 	if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
 		return -EIO;
 
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) - 1);
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-
 	if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
 		return -EINVAL;
 	if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT)
@@ -144,73 +123,57 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
 {
 	struct iwl_mvm *mvm = file->private_data;
 	const struct fw_img *img;
-	int ofs, len, pos = 0;
-	size_t bufsz, ret;
-	char *buf;
+	unsigned int ofs, len;
+	size_t ret;
 	u8 *ptr;
 
 	if (!mvm->ucode_loaded)
 		return -EINVAL;
 
 	/* default is to dump the entire data segment */
-	if (!mvm->dbgfs_sram_offset && !mvm->dbgfs_sram_len) {
-		img = &mvm->fw->img[mvm->cur_ucode];
-		ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
-		len = img->sec[IWL_UCODE_SECTION_DATA].len;
-	} else {
+	img = &mvm->fw->img[mvm->cur_ucode];
+	ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+	len = img->sec[IWL_UCODE_SECTION_DATA].len;
+
+	if (mvm->dbgfs_sram_len) {
 		ofs = mvm->dbgfs_sram_offset;
 		len = mvm->dbgfs_sram_len;
 	}
 
-	bufsz = len * 4 + 256;
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
 	ptr = kzalloc(len, GFP_KERNEL);
-	if (!ptr) {
-		kfree(buf);
+	if (!ptr)
 		return -ENOMEM;
-	}
-
-	pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", len);
-	pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", ofs);
 
 	iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len);
-	for (ofs = 0; ofs < len; ofs += 16) {
-		pos += scnprintf(buf + pos, bufsz - pos, "0x%.4x ", ofs);
-		hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
-				   bufsz - pos, false);
-		pos += strlen(buf + pos);
-		if (bufsz - pos > 0)
-			buf[pos++] = '\n';
-	}
 
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	ret = simple_read_from_buffer(user_buf, count, ppos, ptr, len);
 
-	kfree(buf);
 	kfree(ptr);
 
 	return ret;
 }
 
-static ssize_t iwl_dbgfs_sram_write(struct file *file,
-				    const char __user *user_buf, size_t count,
-				    loff_t *ppos)
+static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf,
+				    size_t count, loff_t *ppos)
 {
-	struct iwl_mvm *mvm = file->private_data;
-	char buf[64];
-	int buf_size;
+	const struct fw_img *img;
 	u32 offset, len;
+	u32 img_offset, img_len;
+
+	if (!mvm->ucode_loaded)
+		return -EINVAL;
 
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) -  1);
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
+	img = &mvm->fw->img[mvm->cur_ucode];
+	img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset;
+	img_len = img->sec[IWL_UCODE_SECTION_DATA].len;
 
 	if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
 		if ((offset & 0x3) || (len & 0x3))
 			return -EINVAL;
+
+		if (offset + len > img_offset + img_len)
+			return -EINVAL;
+
 		mvm->dbgfs_sram_offset = offset;
 		mvm->dbgfs_sram_len = len;
 	} else {
@@ -267,22 +230,14 @@ static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
 	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
-static ssize_t iwl_dbgfs_disable_power_off_write(struct file *file,
-						 const char __user *user_buf,
+static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
 						 size_t count, loff_t *ppos)
 {
-	struct iwl_mvm *mvm = file->private_data;
-	char buf[64] = {};
-	int ret;
-	int val;
+	int ret, val;
 
 	if (!mvm->ucode_loaded)
 		return -EIO;
 
-	count = min_t(size_t, count, sizeof(buf) - 1);
-	if (copy_from_user(buf, user_buf, count))
-		return -EFAULT;
-
 	if (!strncmp("disable_power_off_d0=", buf, 21)) {
 		if (sscanf(buf + 21, "%d", &val) != 1)
 			return -EINVAL;
@@ -302,212 +257,6 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct file *file,
 	return ret ?: count;
 }
 
-static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
-				 struct ieee80211_vif *vif,
-				 enum iwl_dbgfs_pm_mask param, int val)
-{
-	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-	struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm;
-
-	dbgfs_pm->mask |= param;
-
-	switch (param) {
-	case MVM_DEBUGFS_PM_KEEP_ALIVE: {
-		struct ieee80211_hw *hw = mvm->hw;
-		int dtimper = hw->conf.ps_dtim_period ?: 1;
-		int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
-
-		IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
-		if (val * MSEC_PER_SEC < 3 * dtimper_msec) {
-			IWL_WARN(mvm,
-				 "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n",
-				 val * MSEC_PER_SEC, 3 * dtimper_msec);
-		}
-		dbgfs_pm->keep_alive_seconds = val;
-		break;
-	}
-	case MVM_DEBUGFS_PM_SKIP_OVER_DTIM:
-		IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n",
-				val ? "enabled" : "disabled");
-		dbgfs_pm->skip_over_dtim = val;
-		break;
-	case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS:
-		IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val);
-		dbgfs_pm->skip_dtim_periods = val;
-		break;
-	case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT:
-		IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val);
-		dbgfs_pm->rx_data_timeout = val;
-		break;
-	case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT:
-		IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
-		dbgfs_pm->tx_data_timeout = val;
-		break;
-	case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
-		IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
-		dbgfs_pm->disable_power_off = val;
-		break;
-	case MVM_DEBUGFS_PM_LPRX_ENA:
-		IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
-		dbgfs_pm->lprx_ena = val;
-		break;
-	case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD:
-		IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
-		dbgfs_pm->lprx_rssi_threshold = val;
-		break;
-	case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
-		IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
-		dbgfs_pm->snooze_ena = val;
-		break;
-	}
-}
-
-static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
-					 const char __user *user_buf,
-					 size_t count, loff_t *ppos)
-{
-	struct ieee80211_vif *vif = file->private_data;
-	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-	struct iwl_mvm *mvm = mvmvif->dbgfs_data;
-	enum iwl_dbgfs_pm_mask param;
-	char buf[32] = {};
-	int val;
-	int ret;
-
-	count = min_t(size_t, count, sizeof(buf) - 1);
-	if (copy_from_user(buf, user_buf, count))
-		return -EFAULT;
-
-	if (!strncmp("keep_alive=", buf, 11)) {
-		if (sscanf(buf + 11, "%d", &val) != 1)
-			return -EINVAL;
-		param = MVM_DEBUGFS_PM_KEEP_ALIVE;
-	} else if (!strncmp("skip_over_dtim=", buf, 15)) {
-		if (sscanf(buf + 15, "%d", &val) != 1)
-			return -EINVAL;
-		param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM;
-	} else if (!strncmp("skip_dtim_periods=", buf, 18)) {
-		if (sscanf(buf + 18, "%d", &val) != 1)
-			return -EINVAL;
-		param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS;
-	} else if (!strncmp("rx_data_timeout=", buf, 16)) {
-		if (sscanf(buf + 16, "%d", &val) != 1)
-			return -EINVAL;
-		param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT;
-	} else if (!strncmp("tx_data_timeout=", buf, 16)) {
-		if (sscanf(buf + 16, "%d", &val) != 1)
-			return -EINVAL;
-		param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
-	} else if (!strncmp("disable_power_off=", buf, 18) &&
-		   !(mvm->fw->ucode_capa.flags &
-		     IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
-		if (sscanf(buf + 18, "%d", &val) != 1)
-			return -EINVAL;
-		param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
-	} else if (!strncmp("lprx=", buf, 5)) {
-		if (sscanf(buf + 5, "%d", &val) != 1)
-			return -EINVAL;
-		param = MVM_DEBUGFS_PM_LPRX_ENA;
-	} else if (!strncmp("lprx_rssi_threshold=", buf, 20)) {
-		if (sscanf(buf + 20, "%d", &val) != 1)
-			return -EINVAL;
-		if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val <
-		    POWER_LPRX_RSSI_THRESHOLD_MIN)
-			return -EINVAL;
-		param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
-	} else if (!strncmp("snooze_enable=", buf, 14)) {
-		if (sscanf(buf + 14, "%d", &val) != 1)
-			return -EINVAL;
-		param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
-	} else {
-		return -EINVAL;
-	}
-
-	mutex_lock(&mvm->mutex);
-	iwl_dbgfs_update_pm(mvm, vif, param, val);
-	ret = iwl_mvm_power_update_mode(mvm, vif);
-	mutex_unlock(&mvm->mutex);
-
-	return ret ?: count;
-}
-
-static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
-					char __user *user_buf,
-					size_t count, loff_t *ppos)
-{
-	struct ieee80211_vif *vif = file->private_data;
-	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-	struct iwl_mvm *mvm = mvmvif->dbgfs_data;
-	char buf[512];
-	int bufsz = sizeof(buf);
-	int pos;
-
-	pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz);
-
-	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
-					 char __user *user_buf,
-					 size_t count, loff_t *ppos)
-{
-	struct ieee80211_vif *vif = file->private_data;
-	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-	struct iwl_mvm *mvm = mvmvif->dbgfs_data;
-	u8 ap_sta_id;
-	struct ieee80211_chanctx_conf *chanctx_conf;
-	char buf[512];
-	int bufsz = sizeof(buf);
-	int pos = 0;
-	int i;
-
-	mutex_lock(&mvm->mutex);
-
-	ap_sta_id = mvmvif->ap_sta_id;
-
-	pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
-			 mvmvif->id, mvmvif->color);
-	pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
-			 vif->bss_conf.bssid);
-	pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n");
-	for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++) {
-		pos += scnprintf(buf+pos, bufsz-pos,
-				 "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n",
-				 i, mvmvif->queue_params[i].txop,
-				 mvmvif->queue_params[i].cw_min,
-				 mvmvif->queue_params[i].cw_max,
-				 mvmvif->queue_params[i].aifs,
-				 mvmvif->queue_params[i].uapsd);
-	}
-
-	if (vif->type == NL80211_IFTYPE_STATION &&
-	    ap_sta_id != IWL_MVM_STATION_COUNT) {
-		struct ieee80211_sta *sta;
-		struct iwl_mvm_sta *mvm_sta;
-
-		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id],
-						lockdep_is_held(&mvm->mutex));
-		mvm_sta = (void *)sta->drv_priv;
-		pos += scnprintf(buf+pos, bufsz-pos,
-				 "ap_sta_id %d - reduced Tx power %d\n",
-				 ap_sta_id, mvm_sta->bt_reduced_txpower);
-	}
-
-	rcu_read_lock();
-	chanctx_conf = rcu_dereference(vif->chanctx_conf);
-	if (chanctx_conf) {
-		pos += scnprintf(buf+pos, bufsz-pos,
-				 "idle rx chains %d, active rx chains: %d\n",
-				 chanctx_conf->rx_chains_static,
-				 chanctx_conf->rx_chains_dynamic);
-	}
-	rcu_read_unlock();
-
-	mutex_unlock(&mvm->mutex);
-
-	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
 #define BT_MBOX_MSG(_notif, _num, _field)				     \
 	((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
 	>> BT_MBOX##_num##_##_field##_POS)
@@ -783,11 +532,9 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
 }
 #undef PRINT_STAT_LE32
 
-static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
-					  const char __user *user_buf,
+static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
 					  size_t count, loff_t *ppos)
 {
-	struct iwl_mvm *mvm = file->private_data;
 	int ret;
 
 	mutex_lock(&mvm->mutex);
@@ -804,6 +551,14 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
 	return count;
 }
 
+static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
+				      size_t count, loff_t *ppos)
+{
+	iwl_write_prph(mvm->trans, DEVICE_SET_NMI_REG, 1);
+
+	return count;
+}
+
 static ssize_t
 iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
 				char __user *user_buf,
@@ -828,21 +583,11 @@ iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
 }
 
 static ssize_t
-iwl_dbgfs_scan_ant_rxchain_write(struct file *file,
-				 const char __user *user_buf,
+iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
 				 size_t count, loff_t *ppos)
 {
-	struct iwl_mvm *mvm = file->private_data;
-	char buf[8];
-	int buf_size;
 	u8 scan_rx_ant;
 
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) - 1);
-
-	/* get the argument from the user and check if it is valid */
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
 	if (sscanf(buf, "%hhx", &scan_rx_ant) != 1)
 		return -EINVAL;
 	if (scan_rx_ant > ANT_ABC)
@@ -850,228 +595,17 @@ iwl_dbgfs_scan_ant_rxchain_write(struct file *file,
 	if (scan_rx_ant & ~iwl_fw_valid_rx_ant(mvm->fw))
 		return -EINVAL;
 
-	/* change the rx antennas for scan command */
 	mvm->scan_rx_ant = scan_rx_ant;
 
 	return count;
 }
 
-
-static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
-				enum iwl_dbgfs_bf_mask param, int value)
-{
-	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-	struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
-
-	dbgfs_bf->mask |= param;
-
-	switch (param) {
-	case MVM_DEBUGFS_BF_ENERGY_DELTA:
-		dbgfs_bf->bf_energy_delta = value;
-		break;
-	case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA:
-		dbgfs_bf->bf_roaming_energy_delta = value;
-		break;
-	case MVM_DEBUGFS_BF_ROAMING_STATE:
-		dbgfs_bf->bf_roaming_state = value;
-		break;
-	case MVM_DEBUGFS_BF_TEMP_THRESHOLD:
-		dbgfs_bf->bf_temp_threshold = value;
-		break;
-	case MVM_DEBUGFS_BF_TEMP_FAST_FILTER:
-		dbgfs_bf->bf_temp_fast_filter = value;
-		break;
-	case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER:
-		dbgfs_bf->bf_temp_slow_filter = value;
-		break;
-	case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
-		dbgfs_bf->bf_enable_beacon_filter = value;
-		break;
-	case MVM_DEBUGFS_BF_DEBUG_FLAG:
-		dbgfs_bf->bf_debug_flag = value;
-		break;
-	case MVM_DEBUGFS_BF_ESCAPE_TIMER:
-		dbgfs_bf->bf_escape_timer = value;
-		break;
-	case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT:
-		dbgfs_bf->ba_enable_beacon_abort = value;
-		break;
-	case MVM_DEBUGFS_BA_ESCAPE_TIMER:
-		dbgfs_bf->ba_escape_timer = value;
-		break;
-	}
-}
-
-static ssize_t iwl_dbgfs_bf_params_write(struct file *file,
-					 const char __user *user_buf,
-					 size_t count, loff_t *ppos)
-{
-	struct ieee80211_vif *vif = file->private_data;
-	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-	struct iwl_mvm *mvm = mvmvif->dbgfs_data;
-	enum iwl_dbgfs_bf_mask param;
-	char buf[256];
-	int buf_size;
-	int value;
-	int ret = 0;
-
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) - 1);
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-
-	if (!strncmp("bf_energy_delta=", buf, 16)) {
-		if (sscanf(buf+16, "%d", &value) != 1)
-			return -EINVAL;
-		if (value < IWL_BF_ENERGY_DELTA_MIN ||
-		    value > IWL_BF_ENERGY_DELTA_MAX)
-			return -EINVAL;
-		param = MVM_DEBUGFS_BF_ENERGY_DELTA;
-	} else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) {
-		if (sscanf(buf+24, "%d", &value) != 1)
-			return -EINVAL;
-		if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN ||
-		    value > IWL_BF_ROAMING_ENERGY_DELTA_MAX)
-			return -EINVAL;
-		param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA;
-	} else if (!strncmp("bf_roaming_state=", buf, 17)) {
-		if (sscanf(buf+17, "%d", &value) != 1)
-			return -EINVAL;
-		if (value < IWL_BF_ROAMING_STATE_MIN ||
-		    value > IWL_BF_ROAMING_STATE_MAX)
-			return -EINVAL;
-		param = MVM_DEBUGFS_BF_ROAMING_STATE;
-	} else if (!strncmp("bf_temp_threshold=", buf, 18)) {
-		if (sscanf(buf+18, "%d", &value) != 1)
-			return -EINVAL;
-		if (value < IWL_BF_TEMP_THRESHOLD_MIN ||
-		    value > IWL_BF_TEMP_THRESHOLD_MAX)
-			return -EINVAL;
-		param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
-	} else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
-		if (sscanf(buf+20, "%d", &value) != 1)
-			return -EINVAL;
-		if (value < IWL_BF_TEMP_FAST_FILTER_MIN ||
-		    value > IWL_BF_TEMP_FAST_FILTER_MAX)
-			return -EINVAL;
-		param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
-	} else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
-		if (sscanf(buf+20, "%d", &value) != 1)
-			return -EINVAL;
-		if (value < IWL_BF_TEMP_SLOW_FILTER_MIN ||
-		    value > IWL_BF_TEMP_SLOW_FILTER_MAX)
-			return -EINVAL;
-		param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
-	} else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
-		if (sscanf(buf+24, "%d", &value) != 1)
-			return -EINVAL;
-		if (value < 0 || value > 1)
-			return -EINVAL;
-		param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER;
-	} else if (!strncmp("bf_debug_flag=", buf, 14)) {
-		if (sscanf(buf+14, "%d", &value) != 1)
-			return -EINVAL;
-		if (value < 0 || value > 1)
-			return -EINVAL;
-		param = MVM_DEBUGFS_BF_DEBUG_FLAG;
-	} else if (!strncmp("bf_escape_timer=", buf, 16)) {
-		if (sscanf(buf+16, "%d", &value) != 1)
-			return -EINVAL;
-		if (value < IWL_BF_ESCAPE_TIMER_MIN ||
-		    value > IWL_BF_ESCAPE_TIMER_MAX)
-			return -EINVAL;
-		param = MVM_DEBUGFS_BF_ESCAPE_TIMER;
-	} else if (!strncmp("ba_escape_timer=", buf, 16)) {
-		if (sscanf(buf+16, "%d", &value) != 1)
-			return -EINVAL;
-		if (value < IWL_BA_ESCAPE_TIMER_MIN ||
-		    value > IWL_BA_ESCAPE_TIMER_MAX)
-			return -EINVAL;
-		param = MVM_DEBUGFS_BA_ESCAPE_TIMER;
-	} else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) {
-		if (sscanf(buf+23, "%d", &value) != 1)
-			return -EINVAL;
-		if (value < 0 || value > 1)
-			return -EINVAL;
-		param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT;
-	} else {
-		return -EINVAL;
-	}
-
-	mutex_lock(&mvm->mutex);
-	iwl_dbgfs_update_bf(vif, param, value);
-	if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) {
-		ret = iwl_mvm_disable_beacon_filter(mvm, vif);
-	} else {
-		ret = iwl_mvm_enable_beacon_filter(mvm, vif);
-	}
-	mutex_unlock(&mvm->mutex);
-
-	return ret ?: count;
-}
-
-static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
-					char __user *user_buf,
-					size_t count, loff_t *ppos)
-{
-	struct ieee80211_vif *vif = file->private_data;
-	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-	char buf[256];
-	int pos = 0;
-	const size_t bufsz = sizeof(buf);
-	struct iwl_beacon_filter_cmd cmd = {
-		IWL_BF_CMD_CONFIG_DEFAULTS,
-		.bf_enable_beacon_filter =
-			cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
-		.ba_enable_beacon_abort =
-			cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
-	};
-
-	iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
-	if (mvmvif->bf_data.bf_enabled)
-		cmd.bf_enable_beacon_filter = cpu_to_le32(1);
-	else
-		cmd.bf_enable_beacon_filter = 0;
-
-	pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
-			 le32_to_cpu(cmd.bf_energy_delta));
-	pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
-			 le32_to_cpu(cmd.bf_roaming_energy_delta));
-	pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
-			 le32_to_cpu(cmd.bf_roaming_state));
-	pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n",
-			 le32_to_cpu(cmd.bf_temp_threshold));
-	pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n",
-			 le32_to_cpu(cmd.bf_temp_fast_filter));
-	pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n",
-			 le32_to_cpu(cmd.bf_temp_slow_filter));
-	pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
-			 le32_to_cpu(cmd.bf_enable_beacon_filter));
-	pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
-			 le32_to_cpu(cmd.bf_debug_flag));
-	pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
-			 le32_to_cpu(cmd.bf_escape_timer));
-	pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
-			 le32_to_cpu(cmd.ba_escape_timer));
-	pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
-			 le32_to_cpu(cmd.ba_enable_beacon_abort));
-
-	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
 #ifdef CONFIG_PM_SLEEP
-static ssize_t iwl_dbgfs_d3_sram_write(struct file *file,
-				       const char __user *user_buf,
+static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf,
 				       size_t count, loff_t *ppos)
 {
-	struct iwl_mvm *mvm = file->private_data;
-	char buf[8] = {};
 	int store;
 
-	count = min_t(size_t, count, sizeof(buf) - 1);
-	if (copy_from_user(buf, user_buf, count))
-		return -EFAULT;
-
 	if (sscanf(buf, "%d", &store) != 1)
 		return -EINVAL;
 
@@ -1124,61 +658,33 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
 }
 #endif
 
-#define MVM_DEBUGFS_READ_FILE_OPS(name)					\
-static const struct file_operations iwl_dbgfs_##name##_ops = {	\
-	.read = iwl_dbgfs_##name##_read,				\
-	.open = simple_open,						\
-	.llseek = generic_file_llseek,					\
-}
-
-#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name)				\
-static const struct file_operations iwl_dbgfs_##name##_ops = {	\
-	.write = iwl_dbgfs_##name##_write,				\
-	.read = iwl_dbgfs_##name##_read,				\
-	.open = simple_open,						\
-	.llseek = generic_file_llseek,					\
-};
-
-#define MVM_DEBUGFS_WRITE_FILE_OPS(name)				\
-static const struct file_operations iwl_dbgfs_##name##_ops = {	\
-	.write = iwl_dbgfs_##name##_write,				\
-	.open = simple_open,						\
-	.llseek = generic_file_llseek,					\
-};
-
+#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
+	_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+	_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
 #define MVM_DEBUGFS_ADD_FILE(name, parent, mode) do {			\
 		if (!debugfs_create_file(#name, mode, parent, mvm,	\
 					 &iwl_dbgfs_##name##_ops))	\
 			goto err;					\
 	} while (0)
 
-#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do {		\
-		if (!debugfs_create_file(#name, mode, parent, vif,	\
-					 &iwl_dbgfs_##name##_ops))	\
-			goto err;					\
-	} while (0)
-
 /* Device wide debugfs entries */
-MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush);
-MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram);
+MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
+MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
 MVM_DEBUGFS_READ_FILE_OPS(stations);
 MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
 MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
 MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
-MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
 
 #ifdef CONFIG_PM_SLEEP
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
 #endif
 
-/* Interface specific debugfs entries */
-MVM_DEBUGFS_READ_FILE_OPS(mac_params);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params);
-
 int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
 {
 	char buf[100];
@@ -1196,6 +702,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
 				     S_IRUSR | S_IWUSR);
 	MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
 	MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
+	MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR);
 	MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
 			     S_IWUSR | S_IRUSR);
 #ifdef CONFIG_PM_SLEEP
@@ -1206,6 +713,19 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
 		goto err;
 #endif
 
+	if (!debugfs_create_blob("nvm_hw", S_IRUSR,
+				  mvm->debugfs_dir, &mvm->nvm_hw_blob))
+		goto err;
+	if (!debugfs_create_blob("nvm_sw", S_IRUSR,
+				  mvm->debugfs_dir, &mvm->nvm_sw_blob))
+		goto err;
+	if (!debugfs_create_blob("nvm_calib", S_IRUSR,
+				  mvm->debugfs_dir, &mvm->nvm_calib_blob))
+		goto err;
+	if (!debugfs_create_blob("nvm_prod", S_IRUSR,
+				  mvm->debugfs_dir, &mvm->nvm_prod_blob))
+		goto err;
+
 	/*
 	 * Create a symlink with mac80211. It will be removed when mac80211
 	 * exists (before the opmode exists which removes the target.)
@@ -1221,72 +741,3 @@ err:
 	IWL_ERR(mvm, "Can't create the mvm debugfs directory\n");
 	return -ENOMEM;
 }
-
-void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
-{
-	struct dentry *dbgfs_dir = vif->debugfs_dir;
-	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-	char buf[100];
-
-	/*
-	 * Check if debugfs directory already exist before creating it.
-	 * This may happen when, for example, resetting hw or suspend-resume
-	 */
-	if (!dbgfs_dir || mvmvif->dbgfs_dir)
-		return;
-
-	mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
-	mvmvif->dbgfs_data = mvm;
-
-	if (!mvmvif->dbgfs_dir) {
-		IWL_ERR(mvm, "Failed to create debugfs directory under %s\n",
-			dbgfs_dir->d_name.name);
-		return;
-	}
-
-	if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
-	    vif->type == NL80211_IFTYPE_STATION && !vif->p2p)
-		MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
-					 S_IRUSR);
-
-	MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir,
-				 S_IRUSR);
-
-	if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
-	    mvmvif == mvm->bf_allowed_vif)
-		MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
-					 S_IRUSR | S_IWUSR);
-
-	/*
-	 * Create symlink for convenience pointing to interface specific
-	 * debugfs entries for the driver. For example, under
-	 * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/
-	 * find
-	 * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
-	 */
-	snprintf(buf, 100, "../../../%s/%s/%s/%s",
-		 dbgfs_dir->d_parent->d_parent->d_name.name,
-		 dbgfs_dir->d_parent->d_name.name,
-		 dbgfs_dir->d_name.name,
-		 mvmvif->dbgfs_dir->d_name.name);
-
-	mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
-						     mvm->debugfs_dir, buf);
-	if (!mvmvif->dbgfs_slink)
-		IWL_ERR(mvm, "Can't create debugfs symbolic link under %s\n",
-			dbgfs_dir->d_name.name);
-	return;
-err:
-	IWL_ERR(mvm, "Can't create debugfs entity\n");
-}
-
-void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
-{
-	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
-	debugfs_remove(mvmvif->dbgfs_slink);
-	mvmvif->dbgfs_slink = NULL;
-
-	debugfs_remove_recursive(mvmvif->dbgfs_dir);
-	mvmvif->dbgfs_dir = NULL;
-}
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.h b/drivers/net/wireless/iwlwifi/mvm/debugfs.h
new file mode 100644
index 000000000000..e3a9774af495
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.h
@@ -0,0 +1,101 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#define MVM_DEBUGFS_READ_FILE_OPS(name)					\
+static const struct file_operations iwl_dbgfs_##name##_ops = {		\
+	.read = iwl_dbgfs_##name##_read,				\
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+}
+
+#define MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)		\
+static ssize_t _iwl_dbgfs_##name##_write(struct file *file,		\
+					 const char __user *user_buf,	\
+					 size_t count, loff_t *ppos)	\
+{									\
+	argtype *arg = file->private_data;				\
+	char buf[buflen] = {};						\
+	size_t buf_size = min(count, sizeof(buf) -  1);			\
+									\
+	if (copy_from_user(buf, user_buf, buf_size))			\
+		return -EFAULT;						\
+									\
+	return iwl_dbgfs_##name##_write(arg, buf, buf_size, ppos);	\
+}									\
+
+#define _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype)		\
+MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)			\
+static const struct file_operations iwl_dbgfs_##name##_ops = {		\
+	.write = _iwl_dbgfs_##name##_write,				\
+	.read = iwl_dbgfs_##name##_read,				\
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+};
+
+#define _MVM_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype)		\
+MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)			\
+static const struct file_operations iwl_dbgfs_##name##_ops = {		\
+	.write = _iwl_dbgfs_##name##_write,				\
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+};
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
index 4ea5e24ca92d..1b4e54d416b0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -127,6 +127,7 @@ enum iwl_bt_coex_valid_bit_msk {
 	BT_VALID_ANT_ISOLATION_THRS	= BIT(15),
 	BT_VALID_TXTX_DELTA_FREQ_THRS	= BIT(16),
 	BT_VALID_TXRX_MAX_FREQ_0	= BIT(17),
+	BT_VALID_SYNC_TO_SCO		= BIT(18),
 };
 
 /**
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 4e7dd8cf87dc..8415ff312d0e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
index 39c3148bdfa8..c405cda1025f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 5cb93ae5cd2f..884c08725308 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -85,6 +85,8 @@
  *		PBW Snoozing enabled
  * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
  * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
+ * @POWER_FLAGS_AP_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving
+ *		detection enablement
 */
 enum iwl_power_flags {
 	POWER_FLAGS_POWER_SAVE_ENA_MSK		= BIT(0),
@@ -94,6 +96,7 @@ enum iwl_power_flags {
 	POWER_FLAGS_BT_SCO_ENA			= BIT(8),
 	POWER_FLAGS_ADVANCE_PM_ENA_MSK		= BIT(9),
 	POWER_FLAGS_LPRX_ENA_MSK		= BIT(11),
+	POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK	= BIT(12),
 };
 
 #define IWL_POWER_VEC_SIZE 5
@@ -228,6 +231,19 @@ struct iwl_mac_power_cmd {
 	u8 reserved;
 } __packed;
 
+/*
+ * struct iwl_uapsd_misbehaving_ap_notif - FW sends this notification when
+ * associated AP is identified as improperly implementing uAPSD protocol.
+ * PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78
+ * @sta_id: index of station in uCode's station table - associated AP ID in
+ *	    this context.
+ */
+struct iwl_uapsd_misbehaving_ap_notif {
+	__le32 sta_id;
+	u8 mac_id;
+	u8 reserved[3];
+} __packed;
+
 /**
  * struct iwl_beacon_filter_cmd
  * REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index 538f1c7a5966..85057219cc43 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -281,8 +281,31 @@ enum {
 /* # entries in rate scale table to support Tx retries */
 #define  LQ_MAX_RETRY_NUM 16
 
-/* Link quality command flags, only this one is available */
-#define  LQ_FLAG_SET_STA_TLC_RTS_MSK	BIT(0)
+/* Link quality command flags bit fields */
+
+/* Bit 0: (0) Don't use RTS (1) Use RTS */
+#define LQ_FLAG_USE_RTS_POS             0
+#define LQ_FLAG_USE_RTS_MSK	        (1 << LQ_FLAG_USE_RTS_POS)
+
+/* Bit 1-3: LQ command color. Used to match responses to LQ commands */
+#define LQ_FLAG_COLOR_POS               1
+#define LQ_FLAG_COLOR_MSK               (7 << LQ_FLAG_COLOR_POS)
+
+/* Bit 4-5: Tx RTS BW Signalling
+ * (0) No RTS BW signalling
+ * (1) Static BW signalling
+ * (2) Dynamic BW signalling
+ */
+#define LQ_FLAG_RTS_BW_SIG_POS          4
+#define LQ_FLAG_RTS_BW_SIG_NONE         (0 << LQ_FLAG_RTS_BW_SIG_POS)
+#define LQ_FLAG_RTS_BW_SIG_STATIC       (1 << LQ_FLAG_RTS_BW_SIG_POS)
+#define LQ_FLAG_RTS_BW_SIG_DYNAMIC      (2 << LQ_FLAG_RTS_BW_SIG_POS)
+
+/* Bit 6: (0) No dynamic BW selection (1) Allow dynamic BW selection
+ * Dyanmic BW selection allows Tx with narrower BW then requested in rates
+ */
+#define LQ_FLAG_DYNAMIC_BW_POS          6
+#define LQ_FLAG_DYNAMIC_BW_MSK          (1 << LQ_FLAG_DYNAMIC_BW_POS)
 
 /**
  * struct iwl_lq_cmd - link quality command
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index c3782b48ded1..73cbba7424f2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -530,14 +530,13 @@ struct iwl_scan_offload_schedule {
 /*
  * iwl_scan_offload_flags
  *
- * IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID: filter mode - upload every beacon or match
- *	ssid list.
+ * IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
  * IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
  * IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan
  *	on A band.
  */
 enum iwl_scan_offload_flags {
-	IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID	= BIT(0),
+	IWL_SCAN_OFFLOAD_FLAG_PASS_ALL		= BIT(0),
 	IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL	= BIT(2),
 	IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN	= BIT(3),
 };
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index 4aca5933a65d..1b60fdff6a56 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -97,9 +97,6 @@ enum iwl_sta_flags {
 					   STA_FLG_FLG_ANT_B),
 
 	STA_FLG_PS			= BIT(8),
-	STA_FLG_INVALID			= BIT(9),
-	STA_FLG_DLP_EN			= BIT(10),
-	STA_FLG_SET_ALL_KEYS		= BIT(11),
 	STA_FLG_DRAIN_FLOW		= BIT(12),
 	STA_FLG_PAN			= BIT(13),
 	STA_FLG_CLASS_AUTH		= BIT(14),
@@ -138,7 +135,14 @@ enum iwl_sta_flags {
 
 /**
  * enum iwl_sta_key_flag - key flags for the ADD_STA host command
- * @STA_KEY_FLG_EN_MSK: mask for encryption algorithm
+ * @STA_KEY_FLG_NO_ENC: no encryption
+ * @STA_KEY_FLG_WEP: WEP encryption algorithm
+ * @STA_KEY_FLG_CCM: CCMP encryption algorithm
+ * @STA_KEY_FLG_TKIP: TKIP encryption algorithm
+ * @STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support)
+ * @STA_KEY_FLG_CMAC: CMAC encryption algorithm
+ * @STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm
+ * @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value
  * @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
  *	station info array (1 - n 1X mode)
  * @STA_KEY_FLG_KEYID_MSK: the index of the key
@@ -152,6 +156,7 @@ enum iwl_sta_key_flag {
 	STA_KEY_FLG_WEP			= (1 << 0),
 	STA_KEY_FLG_CCM			= (2 << 0),
 	STA_KEY_FLG_TKIP		= (3 << 0),
+	STA_KEY_FLG_EXT			= (4 << 0),
 	STA_KEY_FLG_CMAC		= (6 << 0),
 	STA_KEY_FLG_ENC_UNKNOWN		= (7 << 0),
 	STA_KEY_FLG_EN_MSK		= (7 << 0),
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index d606197bde8f..b674c2a2b51c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -132,6 +132,7 @@ enum iwl_tx_flags {
 #define TX_CMD_SEC_WEP			0x01
 #define TX_CMD_SEC_CCM			0x02
 #define TX_CMD_SEC_TKIP			0x03
+#define TX_CMD_SEC_EXT			0x04
 #define TX_CMD_SEC_MSK			0x07
 #define TX_CMD_SEC_WEP_KEY_IDX_POS	6
 #define TX_CMD_SEC_WEP_KEY_IDX_MSK	0xc0
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index bad5a552dd8d..989d7dbdca6c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -141,6 +141,7 @@ enum {
 
 	/* Power - legacy power table command */
 	POWER_TABLE_CMD = 0x77,
+	PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
 
 	/* Thermal Throttling*/
 	REPLY_THERMAL_MNG_BACKOFF = 0x7e,
@@ -183,6 +184,7 @@ enum {
 	BT_PROFILE_NOTIFICATION = 0xce,
 	BT_COEX_CI = 0x5d,
 
+	REPLY_SF_CFG_CMD = 0xd1,
 	REPLY_BEACON_FILTERING_CMD = 0xd2,
 
 	REPLY_DEBUG_CMD = 0xf0,
@@ -1052,6 +1054,7 @@ enum iwl_mvm_rx_status {
 	RX_MPDU_RES_STATUS_SEC_WEP_ENC			= (1 << 8),
 	RX_MPDU_RES_STATUS_SEC_CCM_ENC			= (2 << 8),
 	RX_MPDU_RES_STATUS_SEC_TKIP_ENC			= (3 << 8),
+	RX_MPDU_RES_STATUS_SEC_EXT_ENC			= (4 << 8),
 	RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC		= (6 << 8),
 	RX_MPDU_RES_STATUS_SEC_ENC_ERR			= (7 << 8),
 	RX_MPDU_RES_STATUS_SEC_ENC_MSK			= (7 << 8),
@@ -1131,6 +1134,7 @@ struct iwl_set_calib_default_cmd {
 } __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */
 
 #define MAX_PORT_ID_NUM	2
+#define MAX_MCAST_FILTERING_ADDRESSES 256
 
 /**
  * struct iwl_mcast_filter_cmd - configure multicast filter.
@@ -1363,4 +1367,65 @@ struct iwl_notif_statistics { /* STATISTICS_NTFY_API_S_VER_8 */
 	struct mvm_statistics_general general;
 } __packed;
 
+/***********************************
+ * Smart Fifo API
+ ***********************************/
+/* Smart Fifo state */
+enum iwl_sf_state {
+	SF_LONG_DELAY_ON = 0, /* should never be called by driver */
+	SF_FULL_ON,
+	SF_UNINIT,
+	SF_INIT_OFF,
+	SF_HW_NUM_STATES
+};
+
+/* Smart Fifo possible scenario */
+enum iwl_sf_scenario {
+	SF_SCENARIO_SINGLE_UNICAST,
+	SF_SCENARIO_AGG_UNICAST,
+	SF_SCENARIO_MULTICAST,
+	SF_SCENARIO_BA_RESP,
+	SF_SCENARIO_TX_RESP,
+	SF_NUM_SCENARIO
+};
+
+#define SF_TRANSIENT_STATES_NUMBER 2	/* SF_LONG_DELAY_ON and SF_FULL_ON */
+#define SF_NUM_TIMEOUT_TYPES 2		/* Aging timer and Idle timer */
+
+/* smart FIFO default values */
+#define SF_W_MARK_SISO 4096
+#define SF_W_MARK_MIMO2 8192
+#define SF_W_MARK_MIMO3 6144
+#define SF_W_MARK_LEGACY 4096
+#define SF_W_MARK_SCAN 4096
+
+/* SF Scenarios timers for FULL_ON state (aligned to 32 uSec) */
+#define SF_SINGLE_UNICAST_IDLE_TIMER 320	/* 300 uSec  */
+#define SF_SINGLE_UNICAST_AGING_TIMER 2016	/* 2 mSec */
+#define SF_AGG_UNICAST_IDLE_TIMER 320		/* 300 uSec */
+#define SF_AGG_UNICAST_AGING_TIMER 2016		/* 2 mSec */
+#define SF_MCAST_IDLE_TIMER 2016		/* 2 mSec */
+#define SF_MCAST_AGING_TIMER 10016		/* 10 mSec */
+#define SF_BA_IDLE_TIMER 320			/* 300 uSec */
+#define SF_BA_AGING_TIMER 2016			/* 2 mSec */
+#define SF_TX_RE_IDLE_TIMER 320			/* 300 uSec */
+#define SF_TX_RE_AGING_TIMER 2016		/* 2 mSec */
+
+#define SF_LONG_DELAY_AGING_TIMER 1000000	/* 1 Sec */
+
+/**
+ * Smart Fifo configuration command.
+ * @state: smart fifo state, types listed in iwl_sf_sate.
+ * @watermark: Minimum allowed availabe free space in RXF for transient state.
+ * @long_delay_timeouts: aging and idle timer values for each scenario
+ * in long delay state.
+ * @full_on_timeouts: timer values for each scenario in full on state.
+ */
+struct iwl_sf_cfg_cmd {
+	enum iwl_sf_state state;
+	__le32 watermark[SF_TRANSIENT_STATES_NUMBER];
+	__le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
+	__le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
+} __packed; /* SF_CFG_API_S_VER_2 */
+
 #endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index 70e5297646b2..c03d39541f9e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -241,7 +241,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
 
 	lockdep_assert_held(&mvm->mutex);
 
-	if (mvm->init_ucode_complete)
+	if (WARN_ON_ONCE(mvm->init_ucode_complete))
 		return 0;
 
 	iwl_init_notification_wait(&mvm->notif_wait,
@@ -287,7 +287,8 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
 		IWL_DEBUG_RF_KILL(mvm,
 				  "jump over all phy activities due to RF kill\n");
 		iwl_remove_notification(&mvm->notif_wait, &calib_wait);
-		return 1;
+		ret = 1;
+		goto out;
 	}
 
 	/* Send TX valid antennas before triggering calibrations */
@@ -319,9 +320,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
 error:
 	iwl_remove_notification(&mvm->notif_wait, &calib_wait);
 out:
-	if (!iwlmvm_mod_params.init_dbg) {
-		iwl_trans_stop_device(mvm->trans);
-	} else if (!mvm->nvm_data) {
+	if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
 		/* we want to debug INIT and we have no NVM - fake */
 		mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
 					sizeof(struct ieee80211_channel) +
@@ -370,11 +369,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
 				ret = -ERFKILL;
 			goto error;
 		}
-		/* should stop & start HW since that INIT image just loaded */
-		iwl_trans_stop_hw(mvm->trans, false);
-		ret = iwl_trans_start_hw(mvm->trans);
-		if (ret)
-			return ret;
+		if (!iwlmvm_mod_params.init_dbg) {
+			/*
+			 * should stop and start HW since that INIT
+			 * image just loaded
+			 */
+			iwl_trans_stop_device(mvm->trans);
+			ret = iwl_trans_start_hw(mvm->trans);
+			if (ret)
+				return ret;
+		}
 	}
 
 	if (iwlmvm_mod_params.init_dbg)
@@ -386,6 +390,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
 		goto error;
 	}
 
+	ret = iwl_mvm_sf_update(mvm, NULL, false);
+	if (ret)
+		IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
+
 	ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw));
 	if (ret)
 		goto error;
diff --git a/drivers/net/wireless/iwlwifi/mvm/led.c b/drivers/net/wireless/iwlwifi/mvm/led.c
index 2269a9e5cc67..6b4ea6bf8ffe 100644
--- a/drivers/net/wireless/iwlwifi/mvm/led.c
+++ b/drivers/net/wireless/iwlwifi/mvm/led.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -103,7 +103,7 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm)
 		return 0;
 	default:
 		return -EINVAL;
-	};
+	}
 
 	mvm->led.name = kasprintf(GFP_KERNEL, "%s-led",
 				   wiphy_name(mvm->hw->wiphy));
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index f41f9b079831..ba723d50939a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -69,10 +69,10 @@
 #include "mvm.h"
 
 const u8 iwl_mvm_ac_to_tx_fifo[] = {
-	IWL_MVM_TX_FIFO_BK,
-	IWL_MVM_TX_FIFO_BE,
-	IWL_MVM_TX_FIFO_VI,
 	IWL_MVM_TX_FIFO_VO,
+	IWL_MVM_TX_FIFO_VI,
+	IWL_MVM_TX_FIFO_BE,
+	IWL_MVM_TX_FIFO_BK,
 };
 
 struct iwl_mvm_mac_iface_iterator_data {
@@ -85,35 +85,15 @@ struct iwl_mvm_mac_iface_iterator_data {
 	bool found_vif;
 };
 
-static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
-				       struct ieee80211_vif *vif)
+static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
+				    struct ieee80211_vif *vif)
 {
 	struct iwl_mvm_mac_iface_iterator_data *data = _data;
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-	u32 ac;
 
-	/* Iterator may already find the interface being added -- skip it */
-	if (vif == data->vif) {
-		data->found_vif = true;
+	/* Skip the interface for which we are trying to assign a tsf_id  */
+	if (vif == data->vif)
 		return;
-	}
-
-	/* Mark the queues used by the vif */
-	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
-		if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
-			__set_bit(vif->hw_queue[ac], data->used_hw_queues);
-
-	if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
-		__set_bit(vif->cab_queue, data->used_hw_queues);
-
-	/*
-	 * Mark MAC IDs as used by clearing the available bit, and
-	 * (below) mark TSFs as used if their existing use is not
-	 * compatible with the new interface type.
-	 * No locking or atomic bit operations are needed since the
-	 * data is on the stack of the caller function.
-	 */
-	__clear_bit(mvmvif->id, data->available_mac_ids);
 
 	/*
 	 * The TSF is a hardware/firmware resource, there are 4 and
@@ -135,21 +115,26 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
 	case NL80211_IFTYPE_STATION:
 		/*
 		 * The new interface is client, so if the existing one
-		 * we're iterating is an AP, the TSF should be used to
+		 * we're iterating is an AP, and both interfaces have the
+		 * same beacon interval, the same TSF should be used to
 		 * avoid drift between the new client and existing AP,
 		 * the existing AP will get drift updates from the new
 		 * client context in this case
 		 */
 		if (vif->type == NL80211_IFTYPE_AP) {
 			if (data->preferred_tsf == NUM_TSF_IDS &&
-			    test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+			    test_bit(mvmvif->tsf_id, data->available_tsf_ids) &&
+			    (vif->bss_conf.beacon_int ==
+			     data->vif->bss_conf.beacon_int)) {
 				data->preferred_tsf = mvmvif->tsf_id;
-			return;
+				return;
+			}
 		}
 		break;
 	case NL80211_IFTYPE_AP:
 		/*
-		 * The new interface is AP/GO, so should get drift
+		 * The new interface is AP/GO, so in case both interfaces
+		 * have the same beacon interval, it should get drift
 		 * updates from an existing client or use the same
 		 * TSF as an existing GO. There's no drift between
 		 * TSFs internally but if they used different TSFs
@@ -159,9 +144,12 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
 		if (vif->type == NL80211_IFTYPE_STATION ||
 		    vif->type == NL80211_IFTYPE_AP) {
 			if (data->preferred_tsf == NUM_TSF_IDS &&
-			    test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+			    test_bit(mvmvif->tsf_id, data->available_tsf_ids) &&
+			    (vif->bss_conf.beacon_int ==
+			     data->vif->bss_conf.beacon_int)) {
 				data->preferred_tsf = mvmvif->tsf_id;
-			return;
+				return;
+			}
 		}
 		break;
 	default:
@@ -187,6 +175,39 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
 		data->preferred_tsf = NUM_TSF_IDS;
 }
 
+static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
+				       struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_mac_iface_iterator_data *data = _data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	u32 ac;
+
+	/* Iterator may already find the interface being added -- skip it */
+	if (vif == data->vif) {
+		data->found_vif = true;
+		return;
+	}
+
+	/* Mark the queues used by the vif */
+	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+		if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+			__set_bit(vif->hw_queue[ac], data->used_hw_queues);
+
+	if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
+		__set_bit(vif->cab_queue, data->used_hw_queues);
+
+	/* Mark MAC IDs as used by clearing the available bit, and
+	 * (below) mark TSFs as used if their existing use is not
+	 * compatible with the new interface type.
+	 * No locking or atomic bit operations are needed since the
+	 * data is on the stack of the caller function.
+	 */
+	__clear_bit(mvmvif->id, data->available_mac_ids);
+
+	/* find a suitable tsf_id */
+	iwl_mvm_mac_tsf_id_iter(_data, mac, vif);
+}
+
 /*
  * Get the mask of the queus used by the vif
  */
@@ -205,6 +226,29 @@ u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
 	return qmask;
 }
 
+void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
+				    struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_mac_iface_iterator_data data = {
+		.mvm = mvm,
+		.vif = vif,
+		.available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 },
+		/* no preference yet */
+		.preferred_tsf = NUM_TSF_IDS,
+	};
+
+	ieee80211_iterate_active_interfaces_atomic(
+		mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+		iwl_mvm_mac_tsf_id_iter, &data);
+
+	if (data.preferred_tsf != NUM_TSF_IDS)
+		mvmvif->tsf_id = data.preferred_tsf;
+	else if (!test_bit(mvmvif->tsf_id, data.available_tsf_ids))
+		mvmvif->tsf_id = find_first_bit(data.available_tsf_ids,
+						NUM_TSF_IDS);
+}
+
 static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
 					       struct ieee80211_vif *vif)
 {
@@ -488,6 +532,40 @@ static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
 	*ofdm_rates = ofdm;
 }
 
+static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm *mvm,
+					 struct ieee80211_vif *vif,
+					 struct iwl_mac_ctx_cmd *cmd)
+{
+	/* for both sta and ap, ht_operation_mode hold the protection_mode */
+	u8 protection_mode = vif->bss_conf.ht_operation_mode &
+				 IEEE80211_HT_OP_MODE_PROTECTION;
+	/* The fw does not distinguish between ht and fat */
+	u32 ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT;
+
+	IWL_DEBUG_RATE(mvm, "protection mode set to %d\n", protection_mode);
+	/*
+	 * See section 9.23.3.1 of IEEE 80211-2012.
+	 * Nongreenfield HT STAs Present is not supported.
+	 */
+	switch (protection_mode) {
+	case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+		break;
+	case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+	case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+		cmd->protection_flags |= cpu_to_le32(ht_flag);
+		break;
+	case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+		/* Protect when channel wider than 20MHz */
+		if (vif->bss_conf.chandef.width > NL80211_CHAN_WIDTH_20)
+			cmd->protection_flags |= cpu_to_le32(ht_flag);
+		break;
+	default:
+		IWL_ERR(mvm, "Illegal protection mode %d\n",
+			protection_mode);
+		break;
+	}
+}
+
 static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
 					struct ieee80211_vif *vif,
 					struct iwl_mac_ctx_cmd *cmd,
@@ -495,6 +573,8 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct ieee80211_chanctx_conf *chanctx;
+	bool ht_enabled = !!(vif->bss_conf.ht_operation_mode &
+			     IEEE80211_HT_OP_MODE_PROTECTION);
 	u8 cck_ack_rates, ofdm_ack_rates;
 	int i;
 
@@ -550,18 +630,23 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
 		cpu_to_le32(vif->bss_conf.use_short_slot ?
 			    MAC_FLG_SHORT_SLOT : 0);
 
-	for (i = 0; i < AC_NUM; i++) {
-		cmd->ac[i].cw_min = cpu_to_le16(mvmvif->queue_params[i].cw_min);
-		cmd->ac[i].cw_max = cpu_to_le16(mvmvif->queue_params[i].cw_max);
-		cmd->ac[i].aifsn = mvmvif->queue_params[i].aifs;
-		cmd->ac[i].edca_txop =
+	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+		u8 txf = iwl_mvm_ac_to_tx_fifo[i];
+
+		cmd->ac[txf].cw_min =
+			cpu_to_le16(mvmvif->queue_params[i].cw_min);
+		cmd->ac[txf].cw_max =
+			cpu_to_le16(mvmvif->queue_params[i].cw_max);
+		cmd->ac[txf].edca_txop =
 			cpu_to_le16(mvmvif->queue_params[i].txop * 32);
-		cmd->ac[i].fifos_mask = BIT(iwl_mvm_ac_to_tx_fifo[i]);
+		cmd->ac[txf].aifsn = mvmvif->queue_params[i].aifs;
+		cmd->ac[txf].fifos_mask = BIT(txf);
 	}
 
 	/* in AP mode, the MCAST FIFO takes the EDCA params from VO */
 	if (vif->type == NL80211_IFTYPE_AP)
-		cmd->ac[AC_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST);
+		cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |=
+			BIT(IWL_MVM_TX_FIFO_MCAST);
 
 	if (vif->bss_conf.qos)
 		cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
@@ -573,16 +658,13 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
 			cmd->protection_flags |=
 				cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
 	}
-
-	/*
-	 * I think that we should enable these 2 flags regardless the HT PROT
-	 * fields in the HT IE, but I am not sure. Someone knows whom to ask?...
-	 */
-	if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
+	IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
+		       vif->bss_conf.use_cts_prot,
+		       vif->bss_conf.ht_operation_mode);
+	if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
 		cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
-		cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_HT_PROT |
-						     MAC_PROT_FLG_FAT_PROT);
-	}
+	if (ht_enabled)
+		iwl_mvm_mac_ctxt_set_ht_flags(mvm, vif, cmd);
 
 	cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
 }
@@ -974,7 +1056,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
 			iwl_mvm_mac_ap_iterator, &data);
 
 		if (data.beacon_device_ts) {
-			u32 rand = (prandom_u32() % (80 - 20)) + 20;
+			u32 rand = (prandom_u32() % (64 - 36)) + 36;
 			mvmvif->ap_beacon_time = data.beacon_device_ts +
 				ieee80211_tu_to_usec(data.beacon_int * rand /
 						     100);
@@ -1153,10 +1235,18 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
 static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
 					 struct ieee80211_vif *vif)
 {
-	u16 *id = _data;
+	struct iwl_missed_beacons_notif *missed_beacons = _data;
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-	if (mvmvif->id == *id)
+	if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id))
+		return;
+
+	/*
+	 * TODO: the threshold should be adjusted based on latency conditions,
+	 * and/or in case of a CS flow on one of the other AP vifs.
+	 */
+	if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) >
+	     IWL_MVM_MISSED_BEACONS_THRESHOLD)
 		ieee80211_beacon_loss(vif);
 }
 
@@ -1165,12 +1255,19 @@ int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
 				    struct iwl_device_cmd *cmd)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_missed_beacons_notif *missed_beacons = (void *)pkt->data;
-	u16 id = (u16)le32_to_cpu(missed_beacons->mac_id);
+	struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
+
+	IWL_DEBUG_INFO(mvm,
+		       "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
+		       le32_to_cpu(mb->mac_id),
+		       le32_to_cpu(mb->consec_missed_beacons),
+		       le32_to_cpu(mb->consec_missed_beacons_since_last_rx),
+		       le32_to_cpu(mb->num_recvd_beacons),
+		       le32_to_cpu(mb->num_expected_beacons));
 
 	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
 						   IEEE80211_IFACE_ITER_NORMAL,
 						   iwl_mvm_beacon_loss_iterator,
-						   &id);
+						   mb);
 	return 0;
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 74bc2c8af06d..c49b5073c251 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -199,9 +199,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 	if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
 		hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
 
-	hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
-			    WIPHY_FLAG_DISABLE_BEACON_HINTS |
-			    WIPHY_FLAG_IBSS_RSN;
+	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+	hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+				       REGULATORY_DISABLE_BEACON_HINTS;
 
 	hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
 	hw->wiphy->n_iface_combinations =
@@ -256,10 +256,17 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 	}
 
 	hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
-			       NL80211_FEATURE_P2P_GO_OPPPS;
+			       NL80211_FEATURE_P2P_GO_OPPPS |
+			       NL80211_FEATURE_LOW_PRIORITY_SCAN;
 
 	mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
 
+	/* currently FW API supports only one optional cipher scheme */
+	if (mvm->fw->cs[0].cipher) {
+		mvm->hw->n_cipher_schemes = 1;
+		mvm->hw->cipher_schemes = &mvm->fw->cs[0];
+	}
+
 #ifdef CONFIG_PM_SLEEP
 	if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
 	    mvm->trans->ops->d3_suspend &&
@@ -398,7 +405,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
 {
 	iwl_trans_stop_device(mvm->trans);
-	iwl_trans_stop_hw(mvm->trans, false);
 
 	mvm->scan_status = IWL_MVM_SCAN_NONE;
 
@@ -470,7 +476,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
 	cancel_work_sync(&mvm->roc_done_wk);
 
 	iwl_trans_stop_device(mvm->trans);
-	iwl_trans_stop_hw(mvm->trans, false);
 
 	iwl_mvm_async_handlers_purge(mvm);
 	/* async_handlers_list is empty and will stay empty: HW is stopped */
@@ -487,17 +492,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
 	cancel_work_sync(&mvm->async_handlers_wk);
 }
 
-static void iwl_mvm_pm_disable_iterator(void *data, u8 *mac,
-					struct ieee80211_vif *vif)
-{
-	struct iwl_mvm *mvm = data;
-	int ret;
-
-	ret = iwl_mvm_power_disable(mvm, vif);
-	if (ret)
-		IWL_ERR(mvm, "failed to disable power management\n");
-}
-
 static void iwl_mvm_power_update_iterator(void *data, u8 *mac,
 					  struct ieee80211_vif *vif)
 {
@@ -520,6 +514,20 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
 	return NULL;
 }
 
+static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+				s8 tx_power)
+{
+	/* FW is in charge of regulatory enforcement */
+	struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
+		.mac_context_id = iwl_mvm_vif_from_mac80211(vif)->id,
+		.pwr_restriction = cpu_to_le16(tx_power),
+	};
+
+	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, CMD_SYNC,
+				    sizeof(reduce_txpwr_cmd),
+				    &reduce_txpwr_cmd);
+}
+
 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
 				     struct ieee80211_vif *vif)
 {
@@ -540,26 +548,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
 	if (ret)
 		goto out_unlock;
 
-	/*
-	 * TODO: remove this temporary code.
-	 * Currently MVM FW supports power management only on single MAC.
-	 * If new interface added, disable PM on existing interface.
-	 * P2P device is a special case, since it is handled by FW similary to
-	 * scan. If P2P deviced is added, PM remains enabled on existing
-	 * interface.
-	 * Note: the method below does not count the new interface being added
-	 * at this moment.
-	 */
+	/* Counting number of interfaces is needed for legacy PM */
 	if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
 		mvm->vif_count++;
-	if (mvm->vif_count > 1) {
-		IWL_DEBUG_MAC80211(mvm,
-				   "Disable power on existing interfaces\n");
-		ieee80211_iterate_active_interfaces_atomic(
-					    mvm->hw,
-					    IEEE80211_IFACE_ITER_NORMAL,
-					    iwl_mvm_pm_disable_iterator, mvm);
-	}
 
 	/*
 	 * The AP binding flow can be done only after the beacon
@@ -590,11 +581,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
 	if (ret)
 		goto out_release;
 
-	/*
-	 * Update power state on the new interface. Admittedly, based on
-	 * mac80211 logics this power update will disable power management
-	 */
-	iwl_mvm_power_update_mode(mvm, vif);
+	iwl_mvm_power_disable(mvm, vif);
 
 	/* beacon filtering */
 	ret = iwl_mvm_disable_beacon_filter(mvm, vif);
@@ -655,9 +642,12 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
  out_release:
 	if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
 		mvm->vif_count--;
+
+	/* TODO: remove this when legacy PM will be discarded */
 	ieee80211_iterate_active_interfaces(
 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 		iwl_mvm_power_update_iterator, mvm);
+
 	iwl_mvm_mac_ctxt_release(mvm, vif);
  out_unlock:
 	mutex_unlock(&mvm->mutex);
@@ -743,21 +733,13 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
 		mvmvif->phy_ctxt = NULL;
 	}
 
-	/*
-	 * TODO: remove this temporary code.
-	 * Currently MVM FW supports power management only on single MAC.
-	 * Check if only one additional interface remains after removing
-	 * current one. Update power mode on the remaining interface.
-	 */
 	if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
 		mvm->vif_count--;
-	IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
-			   mvm->vif_count);
-	if (mvm->vif_count == 1) {
-		ieee80211_iterate_active_interfaces(
-					mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-					iwl_mvm_power_update_iterator, mvm);
-	}
+
+	/* TODO: remove this when legacy PM will be discarded */
+	ieee80211_iterate_active_interfaces(
+		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+		iwl_mvm_power_update_iterator, mvm);
 
 	iwl_mvm_mac_ctxt_remove(mvm, vif);
 
@@ -766,23 +748,91 @@ out_release:
 	mutex_unlock(&mvm->mutex);
 }
 
-static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-				s8 tx_power)
+static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
 {
-	/* FW is in charge of regulatory enforcement */
-	struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
-		.mac_context_id = iwl_mvm_vif_from_mac80211(vif)->id,
-		.pwr_restriction = cpu_to_le16(tx_power),
+	return 0;
+}
+
+struct iwl_mvm_mc_iter_data {
+	struct iwl_mvm *mvm;
+	int port_id;
+};
+
+static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
+				      struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_mc_iter_data *data = _data;
+	struct iwl_mvm *mvm = data->mvm;
+	struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
+	int ret, len;
+
+	/* if we don't have free ports, mcast frames will be dropped */
+	if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
+		return;
+
+	if (vif->type != NL80211_IFTYPE_STATION ||
+	    !vif->bss_conf.assoc)
+		return;
+
+	cmd->port_id = data->port_id++;
+	memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
+	len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd);
+	if (ret)
+		IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
+}
+
+static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
+{
+	struct iwl_mvm_mc_iter_data iter_data = {
+		.mvm = mvm,
 	};
 
-	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, CMD_SYNC,
-				    sizeof(reduce_txpwr_cmd),
-				    &reduce_txpwr_cmd);
+	lockdep_assert_held(&mvm->mutex);
+
+	if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
+		return;
+
+	ieee80211_iterate_active_interfaces(
+		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+		iwl_mvm_mc_iface_iterator, &iter_data);
 }
 
-static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
+static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
+				     struct netdev_hw_addr_list *mc_list)
 {
-	return 0;
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mcast_filter_cmd *cmd;
+	struct netdev_hw_addr *addr;
+	int addr_count = netdev_hw_addr_list_count(mc_list);
+	bool pass_all = false;
+	int len;
+
+	if (addr_count > MAX_MCAST_FILTERING_ADDRESSES) {
+		pass_all = true;
+		addr_count = 0;
+	}
+
+	len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
+	cmd = kzalloc(len, GFP_ATOMIC);
+	if (!cmd)
+		return 0;
+
+	if (pass_all) {
+		cmd->pass_all = 1;
+		return (u64)(unsigned long)cmd;
+	}
+
+	netdev_hw_addr_list_for_each(addr, mc_list) {
+		IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
+				   cmd->count, addr->addr);
+		memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
+		       addr->addr, ETH_ALEN);
+		cmd->count++;
+	}
+
+	return (u64)(unsigned long)cmd;
 }
 
 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
@@ -790,21 +840,22 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
 				     unsigned int *total_flags,
 				     u64 multicast)
 {
-	*total_flags = 0;
-}
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
 
-static int iwl_mvm_configure_mcast_filter(struct iwl_mvm *mvm,
-					  struct ieee80211_vif *vif)
-{
-	struct iwl_mcast_filter_cmd mcast_filter_cmd = {
-		.pass_all = 1,
-	};
+	mutex_lock(&mvm->mutex);
 
-	memcpy(mcast_filter_cmd.bssid, vif->bss_conf.bssid, ETH_ALEN);
+	/* replace previous configuration */
+	kfree(mvm->mcast_filter_cmd);
+	mvm->mcast_filter_cmd = cmd;
 
-	return iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC,
-				    sizeof(mcast_filter_cmd),
-				    &mcast_filter_cmd);
+	if (!cmd)
+		goto out;
+
+	iwl_mvm_recalc_multicast(mvm);
+out:
+	mutex_unlock(&mvm->mutex);
+	*total_flags = 0;
 }
 
 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
@@ -815,6 +866,14 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	int ret;
 
+	/*
+	 * Re-calculate the tsf id, as the master-slave relations depend on the
+	 * beacon interval, which was not known when the station interface was
+	 * added.
+	 */
+	if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
+		iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
+
 	ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
 	if (ret)
 		IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
@@ -827,7 +886,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
 				IWL_ERR(mvm, "failed to update quotas\n");
 				return;
 			}
-			iwl_mvm_configure_mcast_filter(mvm, vif);
 
 			if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
 				     &mvm->status)) {
@@ -849,7 +907,17 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
 				iwl_mvm_protect_session(mvm, vif, dur, dur,
 							5 * dur);
 			}
+
+			iwl_mvm_sf_update(mvm, vif, false);
+			iwl_mvm_power_vif_assoc(mvm, vif);
 		} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
+			/*
+			 * If update fails - SF might be running in associated
+			 * mode while disassociated - which is forbidden.
+			 */
+			WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
+				  "Failed to update SF upon disassociation\n");
+
 			/* remove AP station now that the MAC is unassoc */
 			ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
 			if (ret)
@@ -861,6 +929,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
 				IWL_ERR(mvm, "failed to update quotas\n");
 		}
 
+		iwl_mvm_recalc_multicast(mvm);
+
 		/* reset rssi values */
 		mvmvif->bf_data.ave_beacon_signal = 0;
 
@@ -874,6 +944,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
 				IWL_ERR(mvm, "failed to update power mode\n");
 		}
 		iwl_mvm_bt_coex_vif_change(mvm);
+		iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
+				    IEEE80211_SMPS_AUTOMATIC);
 	} else if (changes & BSS_CHANGED_BEACON_INFO) {
 		/*
 		 * We received a beacon _after_ association so
@@ -881,7 +953,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
 		 */
 		iwl_mvm_remove_time_event(mvm, mvmvif,
 					  &mvmvif->time_event_data);
-	} else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_QOS)) {
+	} else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
+			      BSS_CHANGED_QOS)) {
 		ret = iwl_mvm_power_update_mode(mvm, vif);
 		if (ret)
 			IWL_ERR(mvm, "failed to update power mode\n");
@@ -916,6 +989,13 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
 	if (ret)
 		goto out_unlock;
 
+	/*
+	 * Re-calculate the tsf id, as the master-slave relations depend on the
+	 * beacon interval, which was not known when the AP interface was added.
+	 */
+	if (vif->type == NL80211_IFTYPE_AP)
+		iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
+
 	/* Add the mac context */
 	ret = iwl_mvm_mac_ctxt_add(mvm, vif);
 	if (ret)
@@ -934,9 +1014,16 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
 	if (ret)
 		goto out_unbind;
 
+	/* must be set before quota calculations */
+	mvmvif->ap_ibss_active = true;
+
+	/* power updated needs to be done before quotas */
+	mvm->bound_vif_cnt++;
+	iwl_mvm_power_update_binding(mvm, vif, true);
+
 	ret = iwl_mvm_update_quotas(mvm, vif);
 	if (ret)
-		goto out_rm_bcast;
+		goto out_quota_failed;
 
 	/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
 	if (vif->p2p && mvm->p2p_device_vif)
@@ -947,7 +1034,10 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
 	mutex_unlock(&mvm->mutex);
 	return 0;
 
-out_rm_bcast:
+out_quota_failed:
+	mvm->bound_vif_cnt--;
+	iwl_mvm_power_update_binding(mvm, vif, false);
+	mvmvif->ap_ibss_active = false;
 	iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
 out_unbind:
 	iwl_mvm_binding_remove_vif(mvm, vif);
@@ -979,6 +1069,10 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
 	iwl_mvm_update_quotas(mvm, NULL);
 	iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
 	iwl_mvm_binding_remove_vif(mvm, vif);
+
+	mvm->bound_vif_cnt--;
+	iwl_mvm_power_update_binding(mvm, vif, false);
+
 	iwl_mvm_mac_ctxt_remove(mvm, vif);
 
 	mutex_unlock(&mvm->mutex);
@@ -990,6 +1084,22 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
 				 struct ieee80211_bss_conf *bss_conf,
 				 u32 changes)
 {
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	enum ieee80211_bss_change ht_change = BSS_CHANGED_ERP_CTS_PROT |
+					      BSS_CHANGED_HT |
+					      BSS_CHANGED_BANDWIDTH;
+	int ret;
+
+	/* Changes will be applied when the AP/IBSS is started */
+	if (!mvmvif->ap_ibss_active)
+		return;
+
+	if (changes & ht_change) {
+		ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+		if (ret)
+			IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
+	}
+
 	/* Need to send a new beacon template to the FW */
 	if (changes & BSS_CHANGED_BEACON) {
 		if (iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
@@ -1080,7 +1190,7 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
 				   struct ieee80211_sta *sta)
 {
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
-	struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
 	switch (cmd) {
 	case STA_NOTIFY_SLEEP:
@@ -1102,6 +1212,28 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
 	}
 }
 
+static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
+				       struct ieee80211_vif *vif,
+				       struct ieee80211_sta *sta)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+
+	/*
+	 * This is called before mac80211 does RCU synchronisation,
+	 * so here we already invalidate our internal RCU-protected
+	 * station pointer. The rest of the code will thus no longer
+	 * be able to find the station this way, and we don't rely
+	 * on further RCU synchronisation after the sta_state()
+	 * callback deleted the station.
+	 */
+	mutex_lock(&mvm->mutex);
+	if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
+		rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
+				   ERR_PTR(-ENOENT));
+	mutex_unlock(&mvm->mutex);
+}
+
 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
 				 struct ieee80211_vif *vif,
 				 struct ieee80211_sta *sta,
@@ -1149,7 +1281,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
 		ret = iwl_mvm_update_sta(mvm, vif, sta);
 		if (ret == 0)
 			iwl_mvm_rs_rate_init(mvm, sta,
-					     mvmvif->phy_ctxt->channel->band);
+					     mvmvif->phy_ctxt->channel->band,
+					     true);
 	} else if (old_state == IEEE80211_STA_ASSOC &&
 		   new_state == IEEE80211_STA_AUTHORIZED) {
 		/* enable beacon filtering */
@@ -1187,6 +1320,17 @@ static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
 	return 0;
 }
 
+static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
+				  struct ieee80211_vif *vif,
+				  struct ieee80211_sta *sta, u32 changed)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	if (vif->type == NL80211_IFTYPE_STATION &&
+	    changed & IEEE80211_RC_NSS_CHANGED)
+		iwl_mvm_sf_update(mvm, vif, false);
+}
+
 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
 			       struct ieee80211_vif *vif, u16 ac,
 			       const struct ieee80211_tx_queue_params *params)
@@ -1309,7 +1453,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
 		 */
 		return 0;
 	default:
-		return -EOPNOTSUPP;
+		/* currently FW supports only one optional cipher scheme */
+		if (hw->n_cipher_schemes &&
+		    hw->cipher_schemes->cipher == key->cipher)
+			key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+		else
+			return -EOPNOTSUPP;
 	}
 
 	mutex_lock(&mvm->mutex);
@@ -1515,7 +1664,7 @@ static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
 		goto out;
 	}
 
-	ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
+	ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
 				       ctx->rx_chains_static,
 				       ctx->rx_chains_dynamic);
 	if (ret) {
@@ -1553,13 +1702,14 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
 	if (WARN_ONCE((phy_ctxt->ref > 1) &&
 		      (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
 				   IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
-				   IEEE80211_CHANCTX_CHANGE_RADAR)),
+				   IEEE80211_CHANCTX_CHANGE_RADAR |
+				   IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
 		      "Cannot change PHY. Ref=%d, changed=0x%X\n",
 		      phy_ctxt->ref, changed))
 		return;
 
 	mutex_lock(&mvm->mutex);
-	iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
+	iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
 				 ctx->rx_chains_static,
 				 ctx->rx_chains_dynamic);
 	iwl_mvm_bt_coex_vif_change(mvm);
@@ -1602,7 +1752,13 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
 		goto out_unlock;
 
 	/*
-	 * Setting the quota at this stage is only required for monitor
+	 * Power state must be updated before quotas,
+	 * otherwise fw will complain.
+	 */
+	mvm->bound_vif_cnt++;
+	iwl_mvm_power_update_binding(mvm, vif, true);
+
+	/* Setting the quota at this stage is only required for monitor
 	 * interfaces. For the other types, the bss_info changed flow
 	 * will handle quota settings.
 	 */
@@ -1617,6 +1773,8 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
 
  out_remove_binding:
 	iwl_mvm_binding_remove_vif(mvm, vif);
+	mvm->bound_vif_cnt--;
+	iwl_mvm_power_update_binding(mvm, vif, false);
  out_unlock:
 	mutex_unlock(&mvm->mutex);
 	if (ret)
@@ -1648,6 +1806,9 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
 	}
 
 	iwl_mvm_binding_remove_vif(mvm, vif);
+	mvm->bound_vif_cnt--;
+	iwl_mvm_power_update_binding(mvm, vif, false);
+
 out_unlock:
 	mvmvif->phy_ctxt = NULL;
 	mutex_unlock(&mvm->mutex);
@@ -1744,14 +1905,17 @@ struct ieee80211_ops iwl_mvm_hw_ops = {
 	.add_interface = iwl_mvm_mac_add_interface,
 	.remove_interface = iwl_mvm_mac_remove_interface,
 	.config = iwl_mvm_mac_config,
+	.prepare_multicast = iwl_mvm_prepare_multicast,
 	.configure_filter = iwl_mvm_configure_filter,
 	.bss_info_changed = iwl_mvm_bss_info_changed,
 	.hw_scan = iwl_mvm_mac_hw_scan,
 	.cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
+	.sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
 	.sta_state = iwl_mvm_mac_sta_state,
 	.sta_notify = iwl_mvm_mac_sta_notify,
 	.allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
 	.set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
+	.sta_rc_update = iwl_mvm_sta_rc_update,
 	.conf_tx = iwl_mvm_mac_conf_tx,
 	.mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
 	.sched_scan_start = iwl_mvm_mac_sched_scan_start,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index fed21ef4162d..e4ead86f06d6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -81,6 +81,7 @@
 #define IWL_MVM_MAX_ADDRESSES		5
 /* RSSI offset for WkP */
 #define IWL_RSSI_OFFSET 50
+#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
 
 enum iwl_mvm_tx_fifo {
 	IWL_MVM_TX_FIFO_BK = 0,
@@ -163,6 +164,8 @@ struct iwl_mvm_power_ops {
 				 struct ieee80211_vif *vif);
 	int (*power_update_device_mode)(struct iwl_mvm *mvm);
 	int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+	void (*power_update_binding)(struct iwl_mvm *mvm,
+				     struct ieee80211_vif *vif, bool assign);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 	int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 				char *buf, int bufsz);
@@ -181,6 +184,7 @@ enum iwl_dbgfs_pm_mask {
 	MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
 	MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
 	MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
+	MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9),
 };
 
 struct iwl_dbgfs_pm {
@@ -193,6 +197,7 @@ struct iwl_dbgfs_pm {
 	bool lprx_ena;
 	u32 lprx_rssi_threshold;
 	bool snooze_ena;
+	bool uapsd_misbehaving;
 	int mask;
 };
 
@@ -269,8 +274,8 @@ struct iwl_mvm_vif_bf_data {
  * @bcast_sta: station used for broadcast packets. Used by the following
  *  vifs: P2P_DEVICE, GO and AP.
  * @beacon_skb: the skb used to hold the AP/GO beacon template
- * @smps_requests: the requests of of differents parts of the driver, regard
-	the desired smps mode.
+ * @smps_requests: the SMPS requests of differents parts of the driver,
+ *	combined on update to yield the overall request to mac80211.
  */
 struct iwl_mvm_vif {
 	u16 id;
@@ -323,14 +328,19 @@ struct iwl_mvm_vif {
 #endif
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
+	struct iwl_mvm *mvm;
 	struct dentry *dbgfs_dir;
 	struct dentry *dbgfs_slink;
-	void *dbgfs_data;
 	struct iwl_dbgfs_pm dbgfs_pm;
 	struct iwl_dbgfs_bf dbgfs_bf;
 #endif
 
 	enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
+
+	/* FW identified misbehaving AP */
+	u8 uapsd_misbehaving_bssid[ETH_ALEN];
+
+	bool pm_prevented;
 };
 
 static inline struct iwl_mvm_vif *
@@ -479,6 +489,7 @@ struct iwl_mvm {
 	/* Scan status, cmd (pre-allocated) and auxiliary station */
 	enum iwl_scan_status scan_status;
 	struct iwl_scan_cmd *scan_cmd;
+	struct iwl_mcast_filter_cmd *mcast_filter_cmd;
 
 	/* rx chain antennas set through debugfs for the scan command */
 	u8 scan_rx_ant;
@@ -489,11 +500,19 @@ struct iwl_mvm {
 	u8 scan_last_antenna_idx; /* to toggle TX between antennas */
 	u8 mgmt_last_antenna_idx;
 
+	/* last smart fifo state that was successfully sent to firmware */
+	enum iwl_sf_state sf_state;
+
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 	struct dentry *debugfs_dir;
 	u32 dbgfs_sram_offset, dbgfs_sram_len;
 	bool disable_power_off;
 	bool disable_power_off_d3;
+
+	struct debugfs_blob_wrapper nvm_hw_blob;
+	struct debugfs_blob_wrapper nvm_sw_blob;
+	struct debugfs_blob_wrapper nvm_calib_blob;
+	struct debugfs_blob_wrapper nvm_prod_blob;
 #endif
 
 	struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
@@ -507,12 +526,6 @@ struct iwl_mvm {
 	 */
 	unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
 
-	/*
-	 * This counter of created interfaces is referenced only in conjunction
-	 * with FW limitation related to power management. Currently PM is
-	 * supported only on a single interface.
-	 * IMPORTANT: this variable counts all interfaces except P2P device.
-	 */
 	u8 vif_count;
 
 	/* -1 for always, 0 for never, >0 for that many times */
@@ -531,6 +544,7 @@ struct iwl_mvm {
 	bool store_d3_resume_sram;
 	void *d3_resume_sram;
 	u32 d3_test_pme_ptr;
+	struct ieee80211_vif *keep_vif;
 #endif
 #endif
 
@@ -554,6 +568,11 @@ struct iwl_mvm {
 	u8 aux_queue;
 	u8 first_agg_queue;
 	u8 last_agg_queue;
+
+	u8 bound_vif_cnt;
+
+	/* Indicate if device power save is allowed */
+	bool ps_prevented;
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -693,6 +712,8 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
 int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
 				    struct iwl_rx_cmd_buffer *rxb,
 				    struct iwl_device_cmd *cmd);
+void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
+				    struct ieee80211_vif *vif);
 
 /* Bindings */
 int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -750,8 +771,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 #endif /* CONFIG_IWLWIFI_DEBUGFS */
 
 /* rate scaling */
-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
-			u8 flags, bool init);
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
 
 /* power managment */
 static inline int iwl_mvm_power_update_mode(struct iwl_mvm *mvm,
@@ -773,6 +793,19 @@ static inline int iwl_mvm_power_update_device_mode(struct iwl_mvm *mvm)
 	return 0;
 }
 
+static inline void iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
+						struct ieee80211_vif *vif,
+						bool assign)
+{
+	if (mvm->pm_ops->power_update_binding)
+		mvm->pm_ops->power_update_binding(mvm, vif, assign);
+}
+
+void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+					     struct iwl_rx_cmd_buffer *rxb,
+					     struct iwl_device_cmd *cmd);
+
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
 					    struct ieee80211_vif *vif,
@@ -864,4 +897,8 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm);
 void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
 
+/* smart fifo */
+int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+		      bool added_vif);
+
 #endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 2beffd028b67..35b71af78d02 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -367,16 +367,17 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
 			break;
 		}
 
+		if (WARN(section_id >= NVM_NUM_OF_SECTIONS,
+			 "Invalid NVM section ID %d\n", section_id)) {
+			ret = -EINVAL;
+			break;
+		}
+
 		temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
 		if (!temp) {
 			ret = -ENOMEM;
 			break;
 		}
-		if (WARN_ON(section_id >= NVM_NUM_OF_SECTIONS)) {
-			IWL_ERR(mvm, "Invalid NVM section ID\n");
-			ret = -EINVAL;
-			break;
-		}
 		mvm->nvm_sections[section_id].data = temp;
 		mvm->nvm_sections[section_id].length = section_size;
 
@@ -391,17 +392,16 @@ out:
 /* Loads the NVM data stored in mvm->nvm_sections into the NIC */
 int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
 {
-	int i, ret;
-	u16 section_id;
+	int i, ret = 0;
 	struct iwl_nvm_section *sections = mvm->nvm_sections;
 
 	IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n");
 
-	for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
-		section_id = nvm_to_read[i];
-		ret = iwl_nvm_write_section(mvm, section_id,
-					    sections[section_id].data,
-					    sections[section_id].length);
+	for (i = 0; i < ARRAY_SIZE(mvm->nvm_sections); i++) {
+		if (!mvm->nvm_sections[i].data || !mvm->nvm_sections[i].length)
+			continue;
+		ret = iwl_nvm_write_section(mvm, i, sections[i].data,
+					    sections[i].length);
 		if (ret < 0) {
 			IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
 			break;
@@ -443,6 +443,29 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
 			}
 			mvm->nvm_sections[section].data = temp;
 			mvm->nvm_sections[section].length = ret;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+			switch (section) {
+			case NVM_SECTION_TYPE_HW:
+				mvm->nvm_hw_blob.data = temp;
+				mvm->nvm_hw_blob.size = ret;
+				break;
+			case NVM_SECTION_TYPE_SW:
+				mvm->nvm_sw_blob.data = temp;
+				mvm->nvm_sw_blob.size  = ret;
+				break;
+			case NVM_SECTION_TYPE_CALIBRATION:
+				mvm->nvm_calib_blob.data = temp;
+				mvm->nvm_calib_blob.size  = ret;
+				break;
+			case NVM_SECTION_TYPE_PRODUCTION:
+				mvm->nvm_prod_blob.data = temp;
+				mvm->nvm_prod_blob.size  = ret;
+				break;
+			default:
+				WARN(1, "section: %d", section);
+			}
+#endif
 		}
 		kfree(nvm_buffer);
 		if (ret < 0)
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index d86083c6f445..a3d43de342d7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -236,6 +236,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
 		   false),
 
 	RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false),
+	RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
+		   iwl_mvm_power_uapsd_misbehaving_ap_notif, false),
 };
 #undef RX_HANDLER
 #define CMD(x) [x] = #x
@@ -307,10 +309,12 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
 	CMD(BT_PROFILE_NOTIFICATION),
 	CMD(BT_CONFIG),
 	CMD(MCAST_FILTER_CMD),
+	CMD(REPLY_SF_CFG_CMD),
 	CMD(REPLY_BEACON_FILTERING_CMD),
 	CMD(REPLY_THERMAL_MNG_BACKOFF),
 	CMD(MAC_PM_POWER_TABLE),
 	CMD(BT_COEX_CI),
+	CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
 };
 #undef CMD
 
@@ -341,7 +345,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
 	op_mode = hw->priv;
 	op_mode->ops = &iwl_mvm_ops;
-	op_mode->trans = trans;
 
 	mvm = IWL_OP_MODE_GET_MVM(op_mode);
 	mvm->dev = trans->dev;
@@ -359,6 +362,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 		mvm->aux_queue = 11;
 		mvm->first_agg_queue = 12;
 	}
+	mvm->sf_state = SF_UNINIT;
 
 	mutex_init(&mvm->mutex);
 	spin_lock_init(&mvm->async_handlers_lock);
@@ -424,7 +428,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 	 * there is no need to unnecessarily power up the NIC at driver load
 	 */
 	if (iwlwifi_mod_params.nvm_file) {
-			iwl_nvm_init(mvm);
+		err = iwl_nvm_init(mvm);
+		if (err)
+			goto out_free;
 	} else {
 		err = iwl_trans_start_hw(mvm->trans);
 		if (err)
@@ -432,16 +438,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
 		mutex_lock(&mvm->mutex);
 		err = iwl_run_init_mvm_ucode(mvm, true);
+		iwl_trans_stop_device(trans);
 		mutex_unlock(&mvm->mutex);
 		/* returns 0 if successful, 1 if success but in rfkill */
 		if (err < 0 && !iwlmvm_mod_params.init_dbg) {
 			IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
 			goto out_free;
 		}
-
-		/* Stop the hw after the ALIVE and NVM has been read */
-		if (!iwlmvm_mod_params.init_dbg)
-			iwl_trans_stop_hw(mvm->trans, false);
 	}
 
 	scan_size = sizeof(struct iwl_scan_cmd) +
@@ -470,11 +473,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
  out_unregister:
 	ieee80211_unregister_hw(mvm->hw);
+	iwl_mvm_leds_exit(mvm);
  out_free:
 	iwl_phy_db_free(mvm->phy_db);
 	kfree(mvm->scan_cmd);
 	if (!iwlwifi_mod_params.nvm_file)
-		iwl_trans_stop_hw(trans, true);
+		iwl_trans_op_mode_leave(trans);
 	ieee80211_free_hw(mvm->hw);
 	return NULL;
 }
@@ -491,12 +495,14 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
 	ieee80211_unregister_hw(mvm->hw);
 
 	kfree(mvm->scan_cmd);
+	kfree(mvm->mcast_filter_cmd);
+	mvm->mcast_filter_cmd = NULL;
 
 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
 	kfree(mvm->d3_resume_sram);
 #endif
 
-	iwl_trans_stop_hw(mvm->trans, true);
+	iwl_trans_op_mode_leave(mvm->trans);
 
 	iwl_phy_db_free(mvm->phy_db);
 	mvm->phy_db = NULL;
@@ -661,6 +667,8 @@ static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
 	else
 		clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
 
+	if (state && mvm->cur_ucode != IWL_UCODE_INIT)
+		iwl_trans_stop_device(mvm->trans);
 	wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
 }
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index a8652ddd6bed..b7268c0b3333 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 550824aa84ea..d9eab3b7bb9f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -64,7 +64,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 
 #include <net/mac80211.h>
 
@@ -186,6 +185,92 @@ static void iwl_mvm_power_log(struct iwl_mvm *mvm,
 	}
 }
 
+static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
+					  struct ieee80211_vif *vif,
+					  struct iwl_mac_power_cmd *cmd)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	enum ieee80211_ac_numbers ac;
+	bool tid_found = false;
+
+	for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
+		if (!mvmvif->queue_params[ac].uapsd)
+			continue;
+
+		if (mvm->cur_ucode != IWL_UCODE_WOWLAN)
+			cmd->flags |=
+				cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+
+		cmd->uapsd_ac_flags |= BIT(ac);
+
+		/* QNDP TID - the highest TID with no admission control */
+		if (!tid_found && !mvmvif->queue_params[ac].acm) {
+			tid_found = true;
+			switch (ac) {
+			case IEEE80211_AC_VO:
+				cmd->qndp_tid = 6;
+				break;
+			case IEEE80211_AC_VI:
+				cmd->qndp_tid = 5;
+				break;
+			case IEEE80211_AC_BE:
+				cmd->qndp_tid = 0;
+				break;
+			case IEEE80211_AC_BK:
+				cmd->qndp_tid = 1;
+				break;
+			}
+		}
+	}
+
+	if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)))
+		return;
+
+	cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK);
+
+	if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
+				    BIT(IEEE80211_AC_VI) |
+				    BIT(IEEE80211_AC_BE) |
+				    BIT(IEEE80211_AC_BK))) {
+		cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+		cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
+		cmd->snooze_window = (mvm->cur_ucode == IWL_UCODE_WOWLAN) ?
+			cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
+			cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
+	}
+
+	cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
+
+	if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
+	    cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+		cmd->rx_data_timeout_uapsd =
+			cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+		cmd->tx_data_timeout_uapsd =
+			cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+	} else {
+		cmd->rx_data_timeout_uapsd =
+			cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
+		cmd->tx_data_timeout_uapsd =
+			cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
+	}
+
+	if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+		cmd->heavy_tx_thld_packets =
+			IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
+		cmd->heavy_rx_thld_packets =
+			IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
+	} else {
+		cmd->heavy_tx_thld_packets =
+			IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
+		cmd->heavy_rx_thld_packets =
+			IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+	}
+	cmd->heavy_tx_thld_percentage =
+		IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
+	cmd->heavy_rx_thld_percentage =
+		IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
+}
+
 static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
 				    struct ieee80211_vif *vif,
 				    struct iwl_mac_power_cmd *cmd)
@@ -198,8 +283,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
 	bool radar_detect = false;
 	struct iwl_mvm_vif *mvmvif __maybe_unused =
 		iwl_mvm_vif_from_mac80211(vif);
-	enum ieee80211_ac_numbers ac;
-	bool tid_found = false;
+	bool allow_uapsd = true;
 
 	cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
 							    mvmvif->color));
@@ -217,7 +301,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
 	keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
 	cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
 
-	if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+	if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM ||
+	    mvm->ps_prevented)
 		return;
 
 	cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
@@ -227,7 +312,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
 	    mvmvif->dbgfs_pm.disable_power_off)
 		cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
 #endif
-	if (!vif->bss_conf.ps)
+	if (!vif->bss_conf.ps || mvmvif->pm_prevented)
 		return;
 
 	cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -269,81 +354,24 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
 			cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
 	}
 
-	for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
-		if (!mvmvif->queue_params[ac].uapsd)
-			continue;
-
-		if (mvm->cur_ucode != IWL_UCODE_WOWLAN)
-			cmd->flags |=
-				cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
-
-		cmd->uapsd_ac_flags |= BIT(ac);
+	if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
+		    ETH_ALEN))
+		allow_uapsd = false;
 
-		/* QNDP TID - the highest TID with no admission control */
-		if (!tid_found && !mvmvif->queue_params[ac].acm) {
-			tid_found = true;
-			switch (ac) {
-			case IEEE80211_AC_VO:
-				cmd->qndp_tid = 6;
-				break;
-			case IEEE80211_AC_VI:
-				cmd->qndp_tid = 5;
-				break;
-			case IEEE80211_AC_BE:
-				cmd->qndp_tid = 0;
-				break;
-			case IEEE80211_AC_BK:
-				cmd->qndp_tid = 1;
-				break;
-			}
-		}
-	}
-
-	if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
-		if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
-					    BIT(IEEE80211_AC_VI) |
-					    BIT(IEEE80211_AC_BE) |
-					    BIT(IEEE80211_AC_BK))) {
-			cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
-			cmd->snooze_interval =
-				cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
-			cmd->snooze_window =
-				(mvm->cur_ucode == IWL_UCODE_WOWLAN) ?
-				cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
-				cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
-		}
-
-		cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
-
-		if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
-		    cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
-			cmd->rx_data_timeout_uapsd =
-				cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
-			cmd->tx_data_timeout_uapsd =
-				cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
-		} else {
-			cmd->rx_data_timeout_uapsd =
-				cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
-			cmd->tx_data_timeout_uapsd =
-				cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
-		}
+	if (vif->p2p &&
+	    !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
+		allow_uapsd = false;
+	/*
+	 * Avoid using uAPSD if P2P client is associated to GO that uses
+	 * opportunistic power save. This is due to current FW limitation.
+	 */
+	if (vif->p2p &&
+	    vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
+	    IEEE80211_P2P_OPPPS_ENABLE_BIT)
+		allow_uapsd = false;
 
-		if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
-			cmd->heavy_tx_thld_packets =
-				IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
-			cmd->heavy_rx_thld_packets =
-				IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
-		} else {
-			cmd->heavy_tx_thld_packets =
-				IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
-			cmd->heavy_rx_thld_packets =
-				IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
-		}
-		cmd->heavy_tx_thld_percentage =
-			IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
-		cmd->heavy_rx_thld_percentage =
-			IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
-	}
+	if (allow_uapsd)
+		iwl_mvm_power_configure_uapsd(mvm, vif, cmd);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 	if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
@@ -381,6 +409,13 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
 			cmd->flags &=
 				cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK);
 	}
+	if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_UAPSD_MISBEHAVING) {
+		u16 flag = POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK;
+		if (mvmvif->dbgfs_pm.uapsd_misbehaving)
+			cmd->flags |= cpu_to_le16(flag);
+		else
+			cmd->flags &= cpu_to_le16(flag);
+	}
 #endif /* CONFIG_IWLWIFI_DEBUGFS */
 }
 
@@ -391,18 +426,11 @@ static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm,
 	bool ba_enable;
 	struct iwl_mac_power_cmd cmd = {};
 
-	if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+	if (vif->type != NL80211_IFTYPE_STATION)
 		return 0;
 
-	/*
-	 * TODO: The following vif_count verification is temporary condition.
-	 * Avoid power mode update if more than one interface is currently
-	 * active. Remove this condition when FW will support power management
-	 * on multiple MACs.
-	 */
-	IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n",
-			mvm->vif_count);
-	if (mvm->vif_count > 1)
+	if (vif->p2p &&
+	    !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS))
 		return 0;
 
 	iwl_mvm_power_build_cmd(mvm, vif, &cmd);
@@ -446,7 +474,7 @@ static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
 				    sizeof(cmd), &cmd);
 }
 
-static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
+static int _iwl_mvm_power_update_device(struct iwl_mvm *mvm, bool force_disable)
 {
 	struct iwl_device_power_cmd cmd = {
 		.flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
@@ -455,7 +483,8 @@ static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
 	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
 		return 0;
 
-	if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+	if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM ||
+	    force_disable)
 		cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -472,6 +501,78 @@ static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
 				    &cmd);
 }
 
+static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
+{
+	return _iwl_mvm_power_update_device(mvm, false);
+}
+
+void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid,
+		   ETH_ALEN))
+		memset(mvmvif->uapsd_misbehaving_bssid, 0, ETH_ALEN);
+}
+
+static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac,
+						     struct ieee80211_vif *vif)
+{
+	u8 *ap_sta_id = _data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	/* The ap_sta_id is not expected to change during current association
+	 * so no explicit protection is needed
+	 */
+	if (mvmvif->ap_sta_id == *ap_sta_id)
+		memcpy(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
+		       ETH_ALEN);
+}
+
+int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+					     struct iwl_rx_cmd_buffer *rxb,
+					     struct iwl_device_cmd *cmd)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
+	u8 ap_sta_id = le32_to_cpu(notif->sta_id);
+
+	ieee80211_iterate_active_interfaces_atomic(
+		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+		iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id);
+
+	return 0;
+}
+
+static void iwl_mvm_power_binding_iterator(void *_data, u8 *mac,
+					   struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = _data;
+	int ret;
+
+	mvmvif->pm_prevented = (mvm->bound_vif_cnt <= 1) ? false : true;
+
+	ret = iwl_mvm_power_mac_update_mode(mvm, vif);
+	WARN_ONCE(ret, "Failed to update power parameters on a specific vif\n");
+}
+
+static void _iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
+					  struct ieee80211_vif *vif,
+					  bool assign)
+{
+	if (vif->type == NL80211_IFTYPE_MONITOR) {
+		int ret = _iwl_mvm_power_update_device(mvm, assign);
+		mvm->ps_prevented = assign;
+		WARN_ONCE(ret, "Failed to update power device state\n");
+	}
+
+	ieee80211_iterate_active_interfaces(mvm->hw,
+					    IEEE80211_IFACE_ITER_NORMAL,
+					    iwl_mvm_power_binding_iterator,
+					    mvm);
+}
+
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
 					struct ieee80211_vif *vif, char *buf,
@@ -494,70 +595,58 @@ static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
 	pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
 			 le16_to_cpu(cmd.keep_alive_seconds));
 
-	if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
-		pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
-				 (cmd.flags &
-				 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
-				 1 : 0);
-		pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
-				 cmd.skip_dtim_periods);
-		if (!(cmd.flags &
-		      cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
-			pos += scnprintf(buf+pos, bufsz-pos,
-					 "rx_data_timeout = %d\n",
-					 le32_to_cpu(cmd.rx_data_timeout));
-			pos += scnprintf(buf+pos, bufsz-pos,
-					 "tx_data_timeout = %d\n",
-					 le32_to_cpu(cmd.tx_data_timeout));
-		}
-		if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
-			pos += scnprintf(buf+pos, bufsz-pos,
-					 "lprx_rssi_threshold = %d\n",
-					 cmd.lprx_rssi_threshold);
-		if (cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
-			pos +=
-			scnprintf(buf+pos, bufsz-pos,
-				  "rx_data_timeout_uapsd = %d\n",
-				  le32_to_cpu(cmd.rx_data_timeout_uapsd));
-			pos +=
-			scnprintf(buf+pos, bufsz-pos,
-				  "tx_data_timeout_uapsd = %d\n",
-				  le32_to_cpu(cmd.tx_data_timeout_uapsd));
-			pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n",
-					 cmd.qndp_tid);
-			pos += scnprintf(buf+pos, bufsz-pos,
-					 "uapsd_ac_flags = 0x%x\n",
-					 cmd.uapsd_ac_flags);
-			pos += scnprintf(buf+pos, bufsz-pos,
-					 "uapsd_max_sp = %d\n",
-					 cmd.uapsd_max_sp);
-			pos += scnprintf(buf+pos, bufsz-pos,
-					 "heavy_tx_thld_packets = %d\n",
-					 cmd.heavy_tx_thld_packets);
-			pos += scnprintf(buf+pos, bufsz-pos,
-					 "heavy_rx_thld_packets = %d\n",
-					 cmd.heavy_rx_thld_packets);
-			pos += scnprintf(buf+pos, bufsz-pos,
-					 "heavy_tx_thld_percentage = %d\n",
-					 cmd.heavy_tx_thld_percentage);
-			pos += scnprintf(buf+pos, bufsz-pos,
-					 "heavy_rx_thld_percentage = %d\n",
-					 cmd.heavy_rx_thld_percentage);
-			pos +=
-			scnprintf(buf+pos, bufsz-pos, "snooze_enable = %d\n",
-				  (cmd.flags &
-				   cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) ?
-				  1 : 0);
-		}
-		if (cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
-			pos += scnprintf(buf+pos, bufsz-pos,
-					 "snooze_interval = %d\n",
-					 cmd.snooze_interval);
-			pos += scnprintf(buf+pos, bufsz-pos,
-					 "snooze_window = %d\n",
-					 cmd.snooze_window);
-		}
+	if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)))
+		return pos;
+
+	pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
+			 (cmd.flags &
+			 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? 1 : 0);
+	pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
+			 cmd.skip_dtim_periods);
+	if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
+		pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
+				 le32_to_cpu(cmd.rx_data_timeout));
+		pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
+				 le32_to_cpu(cmd.tx_data_timeout));
 	}
+	if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+		pos += scnprintf(buf+pos, bufsz-pos,
+				 "lprx_rssi_threshold = %d\n",
+				 cmd.lprx_rssi_threshold);
+
+	if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)))
+		return pos;
+
+	pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout_uapsd = %d\n",
+			 le32_to_cpu(cmd.rx_data_timeout_uapsd));
+	pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout_uapsd = %d\n",
+			 le32_to_cpu(cmd.tx_data_timeout_uapsd));
+	pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n", cmd.qndp_tid);
+	pos += scnprintf(buf+pos, bufsz-pos, "uapsd_ac_flags = 0x%x\n",
+			 cmd.uapsd_ac_flags);
+	pos += scnprintf(buf+pos, bufsz-pos, "uapsd_max_sp = %d\n",
+			 cmd.uapsd_max_sp);
+	pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_packets = %d\n",
+			 cmd.heavy_tx_thld_packets);
+	pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_packets = %d\n",
+			 cmd.heavy_rx_thld_packets);
+	pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_percentage = %d\n",
+			 cmd.heavy_tx_thld_percentage);
+	pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_percentage = %d\n",
+			 cmd.heavy_rx_thld_percentage);
+	pos += scnprintf(buf+pos, bufsz-pos, "uapsd_misbehaving_enable = %d\n",
+			 (cmd.flags &
+			  cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK)) ?
+			 1 : 0);
+
+	if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)))
+		return pos;
+
+	pos += scnprintf(buf+pos, bufsz-pos, "snooze_interval = %d\n",
+			 cmd.snooze_interval);
+	pos += scnprintf(buf+pos, bufsz-pos, "snooze_window = %d\n",
+			 cmd.snooze_window);
+
 	return pos;
 }
 
@@ -654,6 +743,7 @@ const struct iwl_mvm_power_ops pm_mac_ops = {
 	.power_update_mode = iwl_mvm_power_mac_update_mode,
 	.power_update_device_mode = iwl_mvm_power_update_device,
 	.power_disable = iwl_mvm_power_mac_disable,
+	.power_update_binding = _iwl_mvm_power_update_binding,
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 	.power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
 #endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
index 2ce79bad5845..ef712ae5bc62 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index 17e2bc827f9a..ce5db6c4ef7e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -217,8 +217,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
 		} else {
 			cmd.quotas[idx].quota =
 				cpu_to_le32(quota * data.n_interfaces[i]);
-			cmd.quotas[idx].max_duration =
-				cpu_to_le32(IWL_MVM_MAX_QUOTA);
+			cmd.quotas[idx].max_duration = cpu_to_le32(0);
 		}
 		idx++;
 	}
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index a0b4cc8d9c3b..6abf74e1351f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -24,7 +24,6 @@
  *
  *****************************************************************************/
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/skbuff.h>
 #include <linux/slab.h>
 #include <net/mac80211.h>
@@ -42,33 +41,37 @@
 
 #define RS_NAME "iwl-mvm-rs"
 
-#define NUM_TRY_BEFORE_ANT_TOGGLE 1
-#define IWL_NUMBER_TRY      1
-#define IWL_HT_NUMBER_TRY   3
+#define NUM_TRY_BEFORE_ANT_TOGGLE       1
+#define RS_LEGACY_RETRIES_PER_RATE      1
+#define RS_HT_VHT_RETRIES_PER_RATE      2
+#define RS_HT_VHT_RETRIES_PER_RATE_TW   1
+#define RS_INITIAL_MIMO_NUM_RATES       3
+#define RS_INITIAL_SISO_NUM_RATES       3
+#define RS_INITIAL_LEGACY_NUM_RATES     LINK_QUAL_MAX_RETRY_NUM
+#define RS_SECONDARY_LEGACY_NUM_RATES   LINK_QUAL_MAX_RETRY_NUM
+#define RS_SECONDARY_SISO_NUM_RATES     3
+#define RS_SECONDARY_SISO_RETRIES       1
 
 #define IWL_RATE_MAX_WINDOW		62	/* # tx in history window */
-#define IWL_RATE_MIN_FAILURE_TH		6	/* min failures to calc tpt */
+#define IWL_RATE_MIN_FAILURE_TH		3	/* min failures to calc tpt */
 #define IWL_RATE_MIN_SUCCESS_TH		8	/* min successes to calc tpt */
 
 /* max allowed rate miss before sync LQ cmd */
 #define IWL_MISSED_RATE_MAX		15
-/* max time to accum history 2 seconds */
-#define IWL_RATE_SCALE_FLUSH_INTVL   (3*HZ)
+#define RS_STAY_IN_COLUMN_TIMEOUT       (5*HZ)
+
 
 static u8 rs_ht_to_legacy[] = {
-	[IWL_RATE_1M_INDEX] = IWL_RATE_6M_INDEX,
-	[IWL_RATE_2M_INDEX] = IWL_RATE_6M_INDEX,
-	[IWL_RATE_5M_INDEX] = IWL_RATE_6M_INDEX,
-	[IWL_RATE_11M_INDEX] = IWL_RATE_6M_INDEX,
-	[IWL_RATE_6M_INDEX] = IWL_RATE_6M_INDEX,
-	[IWL_RATE_9M_INDEX] = IWL_RATE_6M_INDEX,
-	[IWL_RATE_12M_INDEX] = IWL_RATE_9M_INDEX,
-	[IWL_RATE_18M_INDEX] = IWL_RATE_12M_INDEX,
-	[IWL_RATE_24M_INDEX] = IWL_RATE_18M_INDEX,
-	[IWL_RATE_36M_INDEX] = IWL_RATE_24M_INDEX,
-	[IWL_RATE_48M_INDEX] = IWL_RATE_36M_INDEX,
-	[IWL_RATE_54M_INDEX] = IWL_RATE_48M_INDEX,
-	[IWL_RATE_60M_INDEX] = IWL_RATE_54M_INDEX,
+	[IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX,
+	[IWL_RATE_MCS_1_INDEX] = IWL_RATE_9M_INDEX,
+	[IWL_RATE_MCS_2_INDEX] = IWL_RATE_12M_INDEX,
+	[IWL_RATE_MCS_3_INDEX] = IWL_RATE_18M_INDEX,
+	[IWL_RATE_MCS_4_INDEX] = IWL_RATE_24M_INDEX,
+	[IWL_RATE_MCS_5_INDEX] = IWL_RATE_36M_INDEX,
+	[IWL_RATE_MCS_6_INDEX] = IWL_RATE_48M_INDEX,
+	[IWL_RATE_MCS_7_INDEX] = IWL_RATE_54M_INDEX,
+	[IWL_RATE_MCS_8_INDEX] = IWL_RATE_54M_INDEX,
+	[IWL_RATE_MCS_9_INDEX] = IWL_RATE_54M_INDEX,
 };
 
 static const u8 ant_toggle_lookup[] = {
@@ -126,6 +129,196 @@ static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
 	IWL_DECLARE_MCS_RATE(9),                 /* MCS 9 */
 };
 
+enum rs_action {
+	RS_ACTION_STAY = 0,
+	RS_ACTION_DOWNSCALE = -1,
+	RS_ACTION_UPSCALE = 1,
+};
+
+enum rs_column_mode {
+	RS_INVALID = 0,
+	RS_LEGACY,
+	RS_SISO,
+	RS_MIMO2,
+};
+
+#define MAX_NEXT_COLUMNS 5
+#define MAX_COLUMN_CHECKS 3
+
+typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
+				     struct ieee80211_sta *sta,
+				     struct iwl_scale_tbl_info *tbl);
+
+struct rs_tx_column {
+	enum rs_column_mode mode;
+	u8 ant;
+	bool sgi;
+	enum rs_column next_columns[MAX_NEXT_COLUMNS];
+	allow_column_func_t checks[MAX_COLUMN_CHECKS];
+};
+
+static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			  struct iwl_scale_tbl_info *tbl)
+{
+	if (!sta->ht_cap.ht_supported)
+		return false;
+
+	if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+		return false;
+
+	if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 2)
+		return false;
+
+	if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
+		return false;
+
+	return true;
+}
+
+static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			  struct iwl_scale_tbl_info *tbl)
+{
+	if (!sta->ht_cap.ht_supported)
+		return false;
+
+	return true;
+}
+
+static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			 struct iwl_scale_tbl_info *tbl)
+{
+	struct rs_rate *rate = &tbl->rate;
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+
+	if (is_ht20(rate) && (ht_cap->cap &
+			     IEEE80211_HT_CAP_SGI_20))
+		return true;
+	if (is_ht40(rate) && (ht_cap->cap &
+			     IEEE80211_HT_CAP_SGI_40))
+		return true;
+	if (is_ht80(rate) && (vht_cap->cap &
+			     IEEE80211_VHT_CAP_SHORT_GI_80))
+		return true;
+
+	return false;
+}
+
+static const struct rs_tx_column rs_tx_columns[] = {
+	[RS_COLUMN_LEGACY_ANT_A] = {
+		.mode = RS_LEGACY,
+		.ant = ANT_A,
+		.next_columns = {
+			RS_COLUMN_LEGACY_ANT_B,
+			RS_COLUMN_SISO_ANT_A,
+			RS_COLUMN_MIMO2,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+		},
+	},
+	[RS_COLUMN_LEGACY_ANT_B] = {
+		.mode = RS_LEGACY,
+		.ant = ANT_B,
+		.next_columns = {
+			RS_COLUMN_LEGACY_ANT_A,
+			RS_COLUMN_SISO_ANT_B,
+			RS_COLUMN_MIMO2,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+		},
+	},
+	[RS_COLUMN_SISO_ANT_A] = {
+		.mode = RS_SISO,
+		.ant = ANT_A,
+		.next_columns = {
+			RS_COLUMN_SISO_ANT_B,
+			RS_COLUMN_MIMO2,
+			RS_COLUMN_SISO_ANT_A_SGI,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+		},
+		.checks = {
+			rs_siso_allow,
+		},
+	},
+	[RS_COLUMN_SISO_ANT_B] = {
+		.mode = RS_SISO,
+		.ant = ANT_B,
+		.next_columns = {
+			RS_COLUMN_SISO_ANT_A,
+			RS_COLUMN_MIMO2,
+			RS_COLUMN_SISO_ANT_B_SGI,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+		},
+		.checks = {
+			rs_siso_allow,
+		},
+	},
+	[RS_COLUMN_SISO_ANT_A_SGI] = {
+		.mode = RS_SISO,
+		.ant = ANT_A,
+		.sgi = true,
+		.next_columns = {
+			RS_COLUMN_SISO_ANT_B_SGI,
+			RS_COLUMN_MIMO2_SGI,
+			RS_COLUMN_SISO_ANT_A,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+		},
+		.checks = {
+			rs_siso_allow,
+			rs_sgi_allow,
+		},
+	},
+	[RS_COLUMN_SISO_ANT_B_SGI] = {
+		.mode = RS_SISO,
+		.ant = ANT_B,
+		.sgi = true,
+		.next_columns = {
+			RS_COLUMN_SISO_ANT_A_SGI,
+			RS_COLUMN_MIMO2_SGI,
+			RS_COLUMN_SISO_ANT_B,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+		},
+		.checks = {
+			rs_siso_allow,
+			rs_sgi_allow,
+		},
+	},
+	[RS_COLUMN_MIMO2] = {
+		.mode = RS_MIMO2,
+		.ant = ANT_AB,
+		.next_columns = {
+			RS_COLUMN_SISO_ANT_A,
+			RS_COLUMN_MIMO2_SGI,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+		},
+		.checks = {
+			rs_mimo_allow,
+		},
+	},
+	[RS_COLUMN_MIMO2_SGI] = {
+		.mode = RS_MIMO2,
+		.ant = ANT_AB,
+		.sgi = true,
+		.next_columns = {
+			RS_COLUMN_SISO_ANT_A_SGI,
+			RS_COLUMN_MIMO2,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+		},
+		.checks = {
+			rs_mimo_allow,
+			rs_sgi_allow,
+		},
+	},
+};
+
 static inline u8 rs_extract_rate(u32 rate_n_flags)
 {
 	/* also works for HT because bits 7:6 are zero there */
@@ -163,28 +356,19 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
 				return idx;
 	}
 
-	return -1;
+	return IWL_RATE_INVALID;
 }
 
 static void rs_rate_scale_perform(struct iwl_mvm *mvm,
 				   struct sk_buff *skb,
 				   struct ieee80211_sta *sta,
 				   struct iwl_lq_sta *lq_sta);
-static void rs_fill_link_cmd(struct iwl_mvm *mvm,
-			     struct ieee80211_sta *sta,
-			     struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
+static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
+			   struct ieee80211_sta *sta,
+			   struct iwl_lq_sta *lq_sta,
+			   const struct rs_rate *initial_rate);
 static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
 
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
-			     u32 *rate_n_flags);
-#else
-static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
-			     u32 *rate_n_flags)
-{}
-#endif
-
 /**
  * The following tables contain the expected throughput metrics for all rates
  *
@@ -264,6 +448,52 @@ static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
 
 #define MCS_INDEX_PER_STREAM	(8)
 
+static const char *rs_pretty_ant(u8 ant)
+{
+	static const char * const ant_name[] = {
+		[ANT_NONE] = "None",
+		[ANT_A]    = "A",
+		[ANT_B]    = "B",
+		[ANT_AB]   = "AB",
+		[ANT_C]    = "C",
+		[ANT_AC]   = "AC",
+		[ANT_BC]   = "BC",
+		[ANT_ABC]  = "ABC",
+	};
+
+	if (ant > ANT_ABC)
+		return "UNKNOWN";
+
+	return ant_name[ant];
+}
+
+static const char *rs_pretty_lq_type(enum iwl_table_type type)
+{
+	static const char * const lq_types[] = {
+		[LQ_NONE] = "NONE",
+		[LQ_LEGACY_A] = "LEGACY_A",
+		[LQ_LEGACY_G] = "LEGACY_G",
+		[LQ_HT_SISO] = "HT SISO",
+		[LQ_HT_MIMO2] = "HT MIMO",
+		[LQ_VHT_SISO] = "VHT SISO",
+		[LQ_VHT_MIMO2] = "VHT MIMO",
+	};
+
+	if (type < LQ_NONE || type >= LQ_MAX)
+		return "UNKNOWN";
+
+	return lq_types[type];
+}
+
+static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate,
+				const char *prefix)
+{
+	IWL_DEBUG_RATE(mvm, "%s: (%s: %d) ANT: %s BW: %d SGI: %d\n",
+		       prefix, rs_pretty_lq_type(rate->type),
+		       rate->index, rs_pretty_ant(rate->ant),
+		       rate->bw, rate->sgi);
+}
+
 static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
 {
 	window->data = 0;
@@ -271,7 +501,6 @@ static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
 	window->success_ratio = IWL_INVALID_VALUE;
 	window->counter = 0;
 	window->average_tpt = IWL_INVALID_VALUE;
-	window->stamp = 0;
 }
 
 static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
@@ -279,30 +508,6 @@ static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
 	return (ant_type & valid_antenna) == ant_type;
 }
 
-#ifdef CONFIG_MAC80211_DEBUGFS
-/**
- * Program the device to use fixed rate for frame transmit
- * This is for debugging/testing only
- * once the device start use fixed rate, we need to reload the module
- * to being back the normal operation.
- */
-static void rs_program_fix_rate(struct iwl_mvm *mvm,
-				struct iwl_lq_sta *lq_sta)
-{
-	lq_sta->active_legacy_rate = 0x0FFF;	/* 1 - 54 MBits, includes CCK */
-	lq_sta->active_siso_rate   = 0x1FD0;	/* 6 - 60 MBits, no 9, no CCK */
-	lq_sta->active_mimo2_rate  = 0x1FD0;	/* 6 - 60 MBits, no 9, no CCK */
-
-	IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
-		       lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
-
-	if (lq_sta->dbg_fixed_rate) {
-		rs_fill_link_cmd(NULL, NULL, lq_sta, lq_sta->dbg_fixed_rate);
-		iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
-	}
-}
-#endif
-
 static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
 				      struct iwl_lq_sta *lq_data, u8 tid,
 				      struct ieee80211_sta *sta)
@@ -428,192 +633,168 @@ static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
 	else
 		window->average_tpt = IWL_INVALID_VALUE;
 
-	/* Tag this window as having been updated */
-	window->stamp = jiffies;
-
 	return 0;
 }
 
-/*
- * Fill uCode API rate_n_flags field, based on "search" or "active" table.
- */
-/* FIXME:RS:remove this function and put the flags statically in the table */
-static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm,
-				 struct iwl_scale_tbl_info *tbl, int index)
+/* Convert rs_rate object into ucode rate bitmask */
+static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
+				  struct rs_rate *rate)
 {
-	u32 rate_n_flags = 0;
+	u32 ucode_rate = 0;
+	int index = rate->index;
 
-	rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
+	ucode_rate |= ((rate->ant << RATE_MCS_ANT_POS) &
 			 RATE_MCS_ANT_ABC_MSK);
 
-	if (is_legacy(tbl->lq_type)) {
-		rate_n_flags |= iwl_rates[index].plcp;
+	if (is_legacy(rate)) {
+		ucode_rate |= iwl_rates[index].plcp;
 		if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
-			rate_n_flags |= RATE_MCS_CCK_MSK;
-		return rate_n_flags;
+			ucode_rate |= RATE_MCS_CCK_MSK;
+		return ucode_rate;
 	}
 
-	if (is_ht(tbl->lq_type)) {
+	if (is_ht(rate)) {
 		if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) {
 			IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
 			index = IWL_LAST_HT_RATE;
 		}
-		rate_n_flags |= RATE_MCS_HT_MSK;
+		ucode_rate |= RATE_MCS_HT_MSK;
 
-		if (is_ht_siso(tbl->lq_type))
-			rate_n_flags |=	iwl_rates[index].plcp_ht_siso;
-		else if (is_ht_mimo2(tbl->lq_type))
-			rate_n_flags |=	iwl_rates[index].plcp_ht_mimo2;
+		if (is_ht_siso(rate))
+			ucode_rate |= iwl_rates[index].plcp_ht_siso;
+		else if (is_ht_mimo2(rate))
+			ucode_rate |= iwl_rates[index].plcp_ht_mimo2;
 		else
 			WARN_ON_ONCE(1);
-	} else if (is_vht(tbl->lq_type)) {
+	} else if (is_vht(rate)) {
 		if (index < IWL_FIRST_VHT_RATE || index > IWL_LAST_VHT_RATE) {
 			IWL_ERR(mvm, "Invalid VHT rate index %d\n", index);
 			index = IWL_LAST_VHT_RATE;
 		}
-		rate_n_flags |= RATE_MCS_VHT_MSK;
-		if (is_vht_siso(tbl->lq_type))
-			rate_n_flags |=	iwl_rates[index].plcp_vht_siso;
-		else if (is_vht_mimo2(tbl->lq_type))
-			rate_n_flags |=	iwl_rates[index].plcp_vht_mimo2;
+		ucode_rate |= RATE_MCS_VHT_MSK;
+		if (is_vht_siso(rate))
+			ucode_rate |= iwl_rates[index].plcp_vht_siso;
+		else if (is_vht_mimo2(rate))
+			ucode_rate |= iwl_rates[index].plcp_vht_mimo2;
 		else
 			WARN_ON_ONCE(1);
 
 	} else {
-		IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type);
+		IWL_ERR(mvm, "Invalid rate->type %d\n", rate->type);
 	}
 
-	rate_n_flags |= tbl->bw;
-	if (tbl->is_SGI)
-		rate_n_flags |= RATE_MCS_SGI_MSK;
+	ucode_rate |= rate->bw;
+	if (rate->sgi)
+		ucode_rate |= RATE_MCS_SGI_MSK;
 
-	return rate_n_flags;
+	return ucode_rate;
 }
 
-/*
- * Interpret uCode API's rate_n_flags format,
- * fill "search" or "active" tx mode table.
- */
-static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
-				    enum ieee80211_band band,
-				    struct iwl_scale_tbl_info *tbl,
-				    int *rate_idx)
+/* Convert a ucode rate into an rs_rate object */
+static int rs_rate_from_ucode_rate(const u32 ucode_rate,
+				   enum ieee80211_band band,
+				   struct rs_rate *rate)
 {
-	u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
-	u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
+	u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK;
+	u8 num_of_ant = get_num_of_ant_from_rate(ucode_rate);
 	u8 nss;
 
-	memset(tbl, 0, offsetof(struct iwl_scale_tbl_info, win));
-	*rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
+	memset(rate, 0, sizeof(*rate));
+	rate->index = iwl_hwrate_to_plcp_idx(ucode_rate);
 
-	if (*rate_idx  == IWL_RATE_INVALID) {
-		*rate_idx = -1;
+	if (rate->index == IWL_RATE_INVALID)
 		return -EINVAL;
-	}
-	tbl->is_SGI = 0;	/* default legacy setup */
-	tbl->bw = 0;
-	tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
-	tbl->lq_type = LQ_NONE;
-	tbl->max_search = IWL_MAX_SEARCH;
+
+	rate->ant = (ant_msk >> RATE_MCS_ANT_POS);
 
 	/* Legacy */
-	if (!(rate_n_flags & RATE_MCS_HT_MSK) &&
-	    !(rate_n_flags & RATE_MCS_VHT_MSK)) {
+	if (!(ucode_rate & RATE_MCS_HT_MSK) &&
+	    !(ucode_rate & RATE_MCS_VHT_MSK)) {
 		if (num_of_ant == 1) {
 			if (band == IEEE80211_BAND_5GHZ)
-				tbl->lq_type = LQ_LEGACY_A;
+				rate->type = LQ_LEGACY_A;
 			else
-				tbl->lq_type = LQ_LEGACY_G;
+				rate->type = LQ_LEGACY_G;
 		}
 
 		return 0;
 	}
 
 	/* HT or VHT */
-	if (rate_n_flags & RATE_MCS_SGI_MSK)
-		tbl->is_SGI = 1;
+	if (ucode_rate & RATE_MCS_SGI_MSK)
+		rate->sgi = true;
 
-	tbl->bw = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
+	rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK;
 
-	if (rate_n_flags & RATE_MCS_HT_MSK) {
-		nss = ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >>
+	if (ucode_rate & RATE_MCS_HT_MSK) {
+		nss = ((ucode_rate & RATE_HT_MCS_NSS_MSK) >>
 		       RATE_HT_MCS_NSS_POS) + 1;
 
 		if (nss == 1) {
-			tbl->lq_type = LQ_HT_SISO;
+			rate->type = LQ_HT_SISO;
 			WARN_ON_ONCE(num_of_ant != 1);
 		} else if (nss == 2) {
-			tbl->lq_type = LQ_HT_MIMO2;
+			rate->type = LQ_HT_MIMO2;
 			WARN_ON_ONCE(num_of_ant != 2);
 		} else {
 			WARN_ON_ONCE(1);
 		}
-	} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
-		nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+	} else if (ucode_rate & RATE_MCS_VHT_MSK) {
+		nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
 		       RATE_VHT_MCS_NSS_POS) + 1;
 
 		if (nss == 1) {
-			tbl->lq_type = LQ_VHT_SISO;
+			rate->type = LQ_VHT_SISO;
 			WARN_ON_ONCE(num_of_ant != 1);
 		} else if (nss == 2) {
-			tbl->lq_type = LQ_VHT_MIMO2;
+			rate->type = LQ_VHT_MIMO2;
 			WARN_ON_ONCE(num_of_ant != 2);
 		} else {
 			WARN_ON_ONCE(1);
 		}
 	}
 
-	WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_160);
-	WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_80 &&
-		     !is_vht(tbl->lq_type));
+	WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_160);
+	WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 &&
+		     !is_vht(rate));
 
 	return 0;
 }
 
 /* switch to another antenna/antennas and return 1 */
 /* if no other valid antenna found, return 0 */
-static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
-			     struct iwl_scale_tbl_info *tbl)
+static int rs_toggle_antenna(u32 valid_ant, struct rs_rate *rate)
 {
 	u8 new_ant_type;
 
-	if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
+	if (!rate->ant || rate->ant > ANT_ABC)
 		return 0;
 
-	if (!rs_is_valid_ant(valid_ant, tbl->ant_type))
+	if (!rs_is_valid_ant(valid_ant, rate->ant))
 		return 0;
 
-	new_ant_type = ant_toggle_lookup[tbl->ant_type];
+	new_ant_type = ant_toggle_lookup[rate->ant];
 
-	while ((new_ant_type != tbl->ant_type) &&
+	while ((new_ant_type != rate->ant) &&
 	       !rs_is_valid_ant(valid_ant, new_ant_type))
 		new_ant_type = ant_toggle_lookup[new_ant_type];
 
-	if (new_ant_type == tbl->ant_type)
+	if (new_ant_type == rate->ant)
 		return 0;
 
-	tbl->ant_type = new_ant_type;
-	*rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
-	*rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
+	rate->ant = new_ant_type;
+
 	return 1;
 }
 
-/**
- * rs_get_supported_rates - get the available rates
- *
- * if management frame or broadcast frame only return
- * basic available rates.
- *
- */
 static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
-				  struct ieee80211_hdr *hdr,
-				  enum iwl_table_type rate_type)
+				  struct rs_rate *rate)
 {
-	if (is_legacy(rate_type))
+	if (is_legacy(rate))
 		return lq_sta->active_legacy_rate;
-	else if (is_siso(rate_type))
+	else if (is_siso(rate))
 		return lq_sta->active_siso_rate;
-	else if (is_mimo2(rate_type))
+	else if (is_mimo2(rate))
 		return lq_sta->active_mimo2_rate;
 
 	WARN_ON_ONCE(1);
@@ -628,7 +809,7 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
 
 	/* 802.11A or ht walks to the next literal adjacent rate in
 	 * the rate table */
-	if (is_a_band(rate_type) || !is_legacy(rate_type)) {
+	if (is_type_a_band(rate_type) || !is_type_legacy(rate_type)) {
 		int i;
 		u32 mask;
 
@@ -676,73 +857,80 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
 	return (high << 8) | low;
 }
 
-static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
-			     struct iwl_scale_tbl_info *tbl,
-			     u8 scale_index, u8 ht_possible)
+static inline bool rs_rate_supported(struct iwl_lq_sta *lq_sta,
+				     struct rs_rate *rate)
 {
-	s32 low;
-	u16 rate_mask;
+	return BIT(rate->index) & rs_get_supported_rates(lq_sta, rate);
+}
+
+/* Get the next supported lower rate in the current column.
+ * Return true if bottom rate in the current column was reached
+ */
+static bool rs_get_lower_rate_in_column(struct iwl_lq_sta *lq_sta,
+					struct rs_rate *rate)
+{
+	u8 low;
 	u16 high_low;
-	u8 switch_to_legacy = 0;
+	u16 rate_mask;
 	struct iwl_mvm *mvm = lq_sta->drv;
 
-	/* check if we need to switch from HT to legacy rates.
-	 * assumption is that mandatory rates (1Mbps or 6Mbps)
-	 * are always supported (spec demand) */
-	if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
-		switch_to_legacy = 1;
-		scale_index = rs_ht_to_legacy[scale_index];
-		if (lq_sta->band == IEEE80211_BAND_5GHZ)
-			tbl->lq_type = LQ_LEGACY_A;
-		else
-			tbl->lq_type = LQ_LEGACY_G;
+	rate_mask = rs_get_supported_rates(lq_sta, rate);
+	high_low = rs_get_adjacent_rate(mvm, rate->index, rate_mask,
+					rate->type);
+	low = high_low & 0xff;
 
-		if (num_of_ant(tbl->ant_type) > 1)
-			tbl->ant_type =
-			    first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
+	/* Bottom rate of column reached */
+	if (low == IWL_RATE_INVALID)
+		return true;
 
-		tbl->bw = 0;
-		tbl->is_SGI = 0;
-		tbl->max_search = IWL_MAX_SEARCH;
-	}
+	rate->index = low;
+	return false;
+}
 
-	rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
+/* Get the next rate to use following a column downgrade */
+static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
+					  struct rs_rate *rate)
+{
+	struct iwl_mvm *mvm = lq_sta->drv;
 
-	/* Mask with station rate restriction */
-	if (is_legacy(tbl->lq_type)) {
-		/* supp_rates has no CCK bits in A mode */
+	if (is_legacy(rate)) {
+		/* No column to downgrade from Legacy */
+		return;
+	} else if (is_siso(rate)) {
+		/* Downgrade to Legacy if we were in SISO */
 		if (lq_sta->band == IEEE80211_BAND_5GHZ)
-			rate_mask  = (u16)(rate_mask &
-			   (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
+			rate->type = LQ_LEGACY_A;
 		else
-			rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
-	}
+			rate->type = LQ_LEGACY_G;
 
-	/* If we switched from HT to legacy, check current rate */
-	if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
-		low = scale_index;
-		goto out;
+		rate->bw = RATE_MCS_CHAN_WIDTH_20;
+
+		WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX &&
+			     rate->index > IWL_RATE_MCS_9_INDEX);
+
+		rate->index = rs_ht_to_legacy[rate->index];
+	} else {
+		/* Downgrade to SISO with same MCS if in MIMO  */
+		rate->type = is_vht_mimo2(rate) ?
+			LQ_VHT_SISO : LQ_HT_SISO;
 	}
 
-	high_low = rs_get_adjacent_rate(lq_sta->drv, scale_index, rate_mask,
-					tbl->lq_type);
-	low = high_low & 0xff;
 
-	if (low == IWL_RATE_INVALID)
-		low = scale_index;
+	if (num_of_ant(rate->ant) > 1)
+		rate->ant = first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
 
-out:
-	return rate_n_flags_from_tbl(lq_sta->drv, tbl, low);
+	/* Relevant in both switching to SISO or Legacy */
+	rate->sgi = false;
+
+	if (!rs_rate_supported(lq_sta, rate))
+		rs_get_lower_rate_in_column(lq_sta, rate);
 }
 
-/*
- * Simple function to compare two rate scale table types
- */
-static bool table_type_matches(struct iwl_scale_tbl_info *a,
-			       struct iwl_scale_tbl_info *b)
+/* Simple function to compare two rate scale table types */
+static inline bool rs_rate_match(struct rs_rate *a,
+				 struct rs_rate *b)
 {
-	return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
-		(a->is_SGI == b->is_SGI);
+	return (a->type == b->type) && (a->ant == b->ant) && (a->sgi == b->sgi);
 }
 
 static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags)
@@ -766,7 +954,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
 {
 	int legacy_success;
 	int retries;
-	int rs_index, mac_index, i;
+	int mac_index, i;
 	struct iwl_lq_sta *lq_sta = priv_sta;
 	struct iwl_lq_cmd *table;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -774,13 +962,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	enum mac80211_rate_control_flags mac_flags;
-	u32 tx_rate;
-	struct iwl_scale_tbl_info tbl_type;
+	u32 ucode_rate;
+	struct rs_rate rate;
 	struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
 
-	IWL_DEBUG_RATE_LIMIT(mvm,
-			     "get frame ack response, update rate scale window\n");
-
 	/* Treat uninitialized rate scaling data same as non-existing. */
 	if (!lq_sta) {
 		IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
@@ -808,10 +993,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
 	 * to a new "search" mode (which might become the new "active" mode).
 	 */
 	table = &lq_sta->lq;
-	tx_rate = le32_to_cpu(table->rs_table[0]);
-	rs_get_tbl_info_from_mcs(tx_rate, info->band, &tbl_type, &rs_index);
+	ucode_rate = le32_to_cpu(table->rs_table[0]);
+	rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
 	if (info->band == IEEE80211_BAND_5GHZ)
-		rs_index -= IWL_FIRST_OFDM_RATE;
+		rate.index -= IWL_FIRST_OFDM_RATE;
 	mac_flags = info->status.rates[0].flags;
 	mac_index = info->status.rates[0].idx;
 	/* For HT packets, map MCS to PLCP */
@@ -834,19 +1019,19 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
 
 	/* Here we actually compare this rate to the latest LQ command */
 	if ((mac_index < 0) ||
-	    (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
-	    (tbl_type.bw != rs_ch_width_from_mac_flags(mac_flags)) ||
-	    (tbl_type.ant_type != info->status.antenna) ||
-	    (!!(tx_rate & RATE_MCS_HT_MSK) !=
+	    (rate.sgi != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
+	    (rate.bw != rs_ch_width_from_mac_flags(mac_flags)) ||
+	    (rate.ant != info->status.antenna) ||
+	    (!!(ucode_rate & RATE_MCS_HT_MSK) !=
 	     !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
-	    (!!(tx_rate & RATE_MCS_VHT_MSK) !=
+	    (!!(ucode_rate & RATE_MCS_VHT_MSK) !=
 	     !!(mac_flags & IEEE80211_TX_RC_VHT_MCS)) ||
-	    (!!(tx_rate & RATE_HT_MCS_GF_MSK) !=
+	    (!!(ucode_rate & RATE_HT_MCS_GF_MSK) !=
 	     !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
-	    (rs_index != mac_index)) {
+	    (rate.index != mac_index)) {
 		IWL_DEBUG_RATE(mvm,
 			       "initial rate %d does not match %d (0x%x)\n",
-			       mac_index, rs_index, tx_rate);
+			       mac_index, rate.index, ucode_rate);
 		/*
 		 * Since rates mis-match, the last LQ command may have failed.
 		 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
@@ -855,7 +1040,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
 		lq_sta->missed_rate_counter++;
 		if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
 			lq_sta->missed_rate_counter = 0;
-			iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
+			IWL_DEBUG_RATE(mvm,
+				       "Too many rates mismatch. Send sync LQ. rs_state %d\n",
+				       lq_sta->rs_state);
+			iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
 		}
 		/* Regardless, ignore this status info for outdated rate */
 		return;
@@ -864,28 +1052,23 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
 		lq_sta->missed_rate_counter = 0;
 
 	/* Figure out if rate scale algorithm is in active or search table */
-	if (table_type_matches(&tbl_type,
-			       &(lq_sta->lq_info[lq_sta->active_tbl]))) {
+	if (rs_rate_match(&rate,
+			  &(lq_sta->lq_info[lq_sta->active_tbl].rate))) {
 		curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
 		other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
-	} else if (table_type_matches(
-			&tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+	} else if (rs_rate_match(&rate,
+			 &lq_sta->lq_info[1 - lq_sta->active_tbl].rate)) {
 		curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
 		other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
 	} else {
 		IWL_DEBUG_RATE(mvm,
 			       "Neither active nor search matches tx rate\n");
 		tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-		IWL_DEBUG_RATE(mvm, "active- lq:%x, ant:%x, SGI:%d\n",
-			       tmp_tbl->lq_type, tmp_tbl->ant_type,
-			       tmp_tbl->is_SGI);
+		rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
 		tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
-		IWL_DEBUG_RATE(mvm, "search- lq:%x, ant:%x, SGI:%d\n",
-			       tmp_tbl->lq_type, tmp_tbl->ant_type,
-			       tmp_tbl->is_SGI);
-		IWL_DEBUG_RATE(mvm, "actual- lq:%x, ant:%x, SGI:%d\n",
-			       tbl_type.lq_type, tbl_type.ant_type,
-			       tbl_type.is_SGI);
+		rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
+		rs_dump_rate(mvm, &rate, "ACTUAL");
+
 		/*
 		 * no matching table found, let's by-pass the data collection
 		 * and continue to perform rate scale to find the rate table
@@ -902,15 +1085,14 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
 	 * first index into rate scale table.
 	 */
 	if (info->flags & IEEE80211_TX_STAT_AMPDU) {
-		tx_rate = le32_to_cpu(table->rs_table[0]);
-		rs_get_tbl_info_from_mcs(tx_rate, info->band, &tbl_type,
-					 &rs_index);
-		rs_collect_tx_data(curr_tbl, rs_index,
+		ucode_rate = le32_to_cpu(table->rs_table[0]);
+		rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
+		rs_collect_tx_data(curr_tbl, rate.index,
 				   info->status.ampdu_len,
 				   info->status.ampdu_ack_len);
 
 		/* Update success/fail counts if not searching for new mode */
-		if (lq_sta->stay_in_tbl) {
+		if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
 			lq_sta->total_success += info->status.ampdu_ack_len;
 			lq_sta->total_failed += (info->status.ampdu_len -
 					info->status.ampdu_ack_len);
@@ -927,31 +1109,31 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
 		legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
 		/* Collect data for each rate used during failed TX attempts */
 		for (i = 0; i <= retries; ++i) {
-			tx_rate = le32_to_cpu(table->rs_table[i]);
-			rs_get_tbl_info_from_mcs(tx_rate, info->band,
-						 &tbl_type, &rs_index);
+			ucode_rate = le32_to_cpu(table->rs_table[i]);
+			rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
 			/*
 			 * Only collect stats if retried rate is in the same RS
 			 * table as active/search.
 			 */
-			if (table_type_matches(&tbl_type, curr_tbl))
+			if (rs_rate_match(&rate, &curr_tbl->rate))
 				tmp_tbl = curr_tbl;
-			else if (table_type_matches(&tbl_type, other_tbl))
+			else if (rs_rate_match(&rate, &other_tbl->rate))
 				tmp_tbl = other_tbl;
 			else
 				continue;
-			rs_collect_tx_data(tmp_tbl, rs_index, 1,
+
+			rs_collect_tx_data(tmp_tbl, rate.index, 1,
 					   i < retries ? 0 : legacy_success);
 		}
 
 		/* Update success/fail counts if not searching for new mode */
-		if (lq_sta->stay_in_tbl) {
+		if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
 			lq_sta->total_success += legacy_success;
 			lq_sta->total_failed += retries + (1 - legacy_success);
 		}
 	}
 	/* The last TX rate is cached in lq_sta; it's set in if/else above */
-	lq_sta->last_rate_n_flags = tx_rate;
+	lq_sta->last_rate_n_flags = ucode_rate;
 done:
 	/* See if there's a better rate or modulation mode to try. */
 	if (sta && sta->supp_rates[sband->band])
@@ -969,8 +1151,8 @@ done:
 static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
 				 struct iwl_lq_sta *lq_sta)
 {
-	IWL_DEBUG_RATE(mvm, "we are staying in the same table\n");
-	lq_sta->stay_in_tbl = 1;	/* only place this gets set */
+	IWL_DEBUG_RATE(mvm, "Moving to RS_STATE_STAY_IN_COLUMN\n");
+	lq_sta->rs_state = RS_STATE_STAY_IN_COLUMN;
 	if (is_legacy) {
 		lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
 		lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
@@ -984,37 +1166,31 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
 	lq_sta->total_failed = 0;
 	lq_sta->total_success = 0;
 	lq_sta->flush_timer = jiffies;
-	lq_sta->action_counter = 0;
+	lq_sta->visited_columns = 0;
 }
 
-/*
- * Find correct throughput table for given mode of modulation
- */
-static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
-				      struct iwl_scale_tbl_info *tbl)
+static s32 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+				      const struct rs_tx_column *column,
+				      u32 bw)
 {
 	/* Used to choose among HT tables */
 	s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
 
-	/* Check for invalid LQ type */
-	if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_ht(tbl->lq_type) &&
-			 !(is_vht(tbl->lq_type)))) {
-		tbl->expected_tpt = expected_tpt_legacy;
-		return;
-	}
+	if (WARN_ON_ONCE(column->mode != RS_LEGACY &&
+			 column->mode != RS_SISO &&
+			 column->mode != RS_MIMO2))
+		return expected_tpt_legacy;
 
 	/* Legacy rates have only one table */
-	if (is_legacy(tbl->lq_type)) {
-		tbl->expected_tpt = expected_tpt_legacy;
-		return;
-	}
+	if (column->mode == RS_LEGACY)
+		return expected_tpt_legacy;
 
 	ht_tbl_pointer = expected_tpt_mimo2_20MHz;
 	/* Choose among many HT tables depending on number of streams
 	 * (SISO/MIMO2), channel width (20/40/80), SGI, and aggregation
 	 * status */
-	if (is_siso(tbl->lq_type)) {
-		switch (tbl->bw) {
+	if (column->mode == RS_SISO) {
+		switch (bw) {
 		case RATE_MCS_CHAN_WIDTH_20:
 			ht_tbl_pointer = expected_tpt_siso_20MHz;
 			break;
@@ -1027,8 +1203,8 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
 		default:
 			WARN_ON_ONCE(1);
 		}
-	} else if (is_mimo2(tbl->lq_type)) {
-		switch (tbl->bw) {
+	} else if (column->mode == RS_MIMO2) {
+		switch (bw) {
 		case RATE_MCS_CHAN_WIDTH_20:
 			ht_tbl_pointer = expected_tpt_mimo2_20MHz;
 			break;
@@ -1045,14 +1221,23 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
 		WARN_ON_ONCE(1);
 	}
 
-	if (!tbl->is_SGI && !lq_sta->is_agg)		/* Normal */
-		tbl->expected_tpt = ht_tbl_pointer[0];
-	else if (tbl->is_SGI && !lq_sta->is_agg)	/* SGI */
-		tbl->expected_tpt = ht_tbl_pointer[1];
-	else if (!tbl->is_SGI && lq_sta->is_agg)	/* AGG */
-		tbl->expected_tpt = ht_tbl_pointer[2];
+	if (!column->sgi && !lq_sta->is_agg)		/* Normal */
+		return ht_tbl_pointer[0];
+	else if (column->sgi && !lq_sta->is_agg)        /* SGI */
+		return ht_tbl_pointer[1];
+	else if (!column->sgi && lq_sta->is_agg)        /* AGG */
+		return ht_tbl_pointer[2];
 	else						/* AGG+SGI */
-		tbl->expected_tpt = ht_tbl_pointer[3];
+		return ht_tbl_pointer[3];
+}
+
+static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+				      struct iwl_scale_tbl_info *tbl)
+{
+	struct rs_rate *rate = &tbl->rate;
+	const struct rs_tx_column *column = &rs_tx_columns[tbl->column];
+
+	tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
 }
 
 /*
@@ -1089,7 +1274,7 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
 
 	while (1) {
 		high_low = rs_get_adjacent_rate(mvm, rate, rate_mask,
-						tbl->lq_type);
+						tbl->rate.type);
 
 		low = high_low & 0xff;
 		high = (high_low >> 8) & 0xff;
@@ -1110,7 +1295,7 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
 		 *    "active" throughput (under perfect conditions).
 		 */
 		if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
-		     ((active_sr > IWL_RATE_DECREASE_TH) &&
+		     ((active_sr > RS_SR_FORCE_DECREASE) &&
 		      (active_sr <= IWL_RATE_HIGH_TH) &&
 		      (tpt_tbl[rate] <= active_tpt))) ||
 		    ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
@@ -1157,417 +1342,14 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
 	return new_rate;
 }
 
-/* Move to the next action and wrap around to the first action in case
- * we're at the last action. Assumes actions start at 0.
- */
-static inline void rs_move_next_action(struct iwl_scale_tbl_info *tbl,
-				       u8 last_action)
-{
-	BUILD_BUG_ON(IWL_LEGACY_FIRST_ACTION != 0);
-	BUILD_BUG_ON(IWL_SISO_FIRST_ACTION != 0);
-	BUILD_BUG_ON(IWL_MIMO2_FIRST_ACTION != 0);
-
-	tbl->action = (tbl->action + 1) % (last_action + 1);
-}
-
-static void rs_set_bw_from_sta(struct iwl_scale_tbl_info *tbl,
-			       struct ieee80211_sta *sta)
+static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
 {
 	if (sta->bandwidth >= IEEE80211_STA_RX_BW_80)
-		tbl->bw = RATE_MCS_CHAN_WIDTH_80;
+		return RATE_MCS_CHAN_WIDTH_80;
 	else if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
-		tbl->bw = RATE_MCS_CHAN_WIDTH_40;
-	else
-		tbl->bw = RATE_MCS_CHAN_WIDTH_20;
-}
-
-static bool rs_sgi_allowed(struct iwl_scale_tbl_info *tbl,
-			   struct ieee80211_sta *sta)
-{
-	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
-	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
-
-	if (is_ht20(tbl) && (ht_cap->cap &
-			     IEEE80211_HT_CAP_SGI_20))
-		return true;
-	if (is_ht40(tbl) && (ht_cap->cap &
-			     IEEE80211_HT_CAP_SGI_40))
-		return true;
-	if (is_ht80(tbl) && (vht_cap->cap &
-			     IEEE80211_VHT_CAP_SHORT_GI_80))
-		return true;
-
-	return false;
-}
-
-/*
- * Set up search table for MIMO2
- */
-static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
-			     struct iwl_lq_sta *lq_sta,
-			     struct ieee80211_sta *sta,
-			     struct iwl_scale_tbl_info *tbl, int index)
-{
-	u16 rate_mask;
-	s32 rate;
-
-	if (!sta->ht_cap.ht_supported)
-		return -1;
-
-	if (sta->smps_mode == IEEE80211_SMPS_STATIC)
-		return -1;
-
-	/* Need both Tx chains/antennas to support MIMO */
-	if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 2)
-		return -1;
-
-	IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO2\n");
-
-	tbl->lq_type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
-	tbl->action = 0;
-	tbl->max_search = IWL_MAX_SEARCH;
-	rate_mask = lq_sta->active_mimo2_rate;
-
-	rs_set_bw_from_sta(tbl, sta);
-	rs_set_expected_tpt_table(lq_sta, tbl);
-
-	rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
-
-	IWL_DEBUG_RATE(mvm, "LQ: MIMO2 best rate %d mask %X\n",
-		       rate, rate_mask);
-	if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
-		IWL_DEBUG_RATE(mvm, "Can't switch with index %d rate mask %x\n",
-			       rate, rate_mask);
-		return -1;
-	}
-	tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate);
-
-	IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index\n",
-		       tbl->current_rate);
-	return 0;
-}
-
-/*
- * Set up search table for SISO
- */
-static int rs_switch_to_siso(struct iwl_mvm *mvm,
-			     struct iwl_lq_sta *lq_sta,
-			     struct ieee80211_sta *sta,
-			     struct iwl_scale_tbl_info *tbl, int index)
-{
-	u16 rate_mask;
-	s32 rate;
-
-	if (!sta->ht_cap.ht_supported)
-		return -1;
-
-	IWL_DEBUG_RATE(mvm, "LQ: try to switch to SISO\n");
-
-	tbl->lq_type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
-	tbl->action = 0;
-	tbl->max_search = IWL_MAX_SEARCH;
-	rate_mask = lq_sta->active_siso_rate;
-
-	rs_set_bw_from_sta(tbl, sta);
-	rs_set_expected_tpt_table(lq_sta, tbl);
-	rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
-
-	IWL_DEBUG_RATE(mvm, "LQ: get best rate %d mask %X\n", rate, rate_mask);
-	if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
-		IWL_DEBUG_RATE(mvm,
-			       "can not switch with index %d rate mask %x\n",
-			       rate, rate_mask);
-		return -1;
-	}
-	tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate);
-	IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index\n",
-		       tbl->current_rate);
-	return 0;
-}
-
-/*
- * Try to switch to new modulation mode from legacy
- */
-static int rs_move_legacy_other(struct iwl_mvm *mvm,
-				struct iwl_lq_sta *lq_sta,
-				struct ieee80211_sta *sta,
-				int index)
-{
-	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-	struct iwl_scale_tbl_info *search_tbl =
-				&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
-	struct iwl_rate_scale_data *window = &(tbl->win[index]);
-	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
-		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
-	u8 start_action;
-	u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
-	u8 tx_chains_num = num_of_ant(valid_tx_ant);
-	int ret;
-	u8 update_search_tbl_counter = 0;
-
-	start_action = tbl->action;
-	while (1) {
-		lq_sta->action_counter++;
-		switch (tbl->action) {
-		case IWL_LEGACY_SWITCH_ANTENNA:
-			IWL_DEBUG_RATE(mvm, "LQ: Legacy toggle Antenna\n");
-
-			if (tx_chains_num <= 1)
-				break;
-
-			/* Don't change antenna if success has been great */
-			if (window->success_ratio >= IWL_RS_GOOD_RATIO)
-				break;
-
-			/* Set up search table to try other antenna */
-			memcpy(search_tbl, tbl, sz);
-
-			if (rs_toggle_antenna(valid_tx_ant,
-					      &search_tbl->current_rate,
-					      search_tbl)) {
-				update_search_tbl_counter = 1;
-				rs_set_expected_tpt_table(lq_sta, search_tbl);
-				goto out;
-			}
-			break;
-		case IWL_LEGACY_SWITCH_SISO:
-			IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to SISO\n");
-
-			/* Set up search table to try SISO */
-			memcpy(search_tbl, tbl, sz);
-			search_tbl->is_SGI = 0;
-			ret = rs_switch_to_siso(mvm, lq_sta, sta,
-						 search_tbl, index);
-			if (!ret) {
-				lq_sta->action_counter = 0;
-				goto out;
-			}
-
-			break;
-		case IWL_LEGACY_SWITCH_MIMO2:
-			IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO2\n");
-
-			/* Set up search table to try MIMO */
-			memcpy(search_tbl, tbl, sz);
-			search_tbl->is_SGI = 0;
-
-			search_tbl->ant_type = ANT_AB;
-
-			if (!rs_is_valid_ant(valid_tx_ant,
-					     search_tbl->ant_type))
-				break;
-
-			ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
-						 search_tbl, index);
-			if (!ret) {
-				lq_sta->action_counter = 0;
-				goto out;
-			}
-			break;
-		default:
-			WARN_ON_ONCE(1);
-		}
-		rs_move_next_action(tbl, IWL_LEGACY_LAST_ACTION);
-
-		if (tbl->action == start_action)
-			break;
-	}
-	search_tbl->lq_type = LQ_NONE;
-	return 0;
-
-out:
-	lq_sta->search_better_tbl = 1;
-	rs_move_next_action(tbl, IWL_LEGACY_LAST_ACTION);
-	if (update_search_tbl_counter)
-		search_tbl->action = tbl->action;
-	return 0;
-}
-
-/*
- * Try to switch to new modulation mode from SISO
- */
-static int rs_move_siso_to_other(struct iwl_mvm *mvm,
-				 struct iwl_lq_sta *lq_sta,
-				 struct ieee80211_sta *sta, int index)
-{
-	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-	struct iwl_scale_tbl_info *search_tbl =
-				&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
-	struct iwl_rate_scale_data *window = &(tbl->win[index]);
-	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
-		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
-	u8 start_action;
-	u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
-	u8 tx_chains_num = num_of_ant(valid_tx_ant);
-	u8 update_search_tbl_counter = 0;
-	int ret;
-
-	if (tbl->action == IWL_SISO_SWITCH_MIMO2 &&
-	    !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
-		tbl->action = IWL_SISO_SWITCH_ANTENNA;
-
-	start_action = tbl->action;
-	while (1) {
-		lq_sta->action_counter++;
-		switch (tbl->action) {
-		case IWL_SISO_SWITCH_ANTENNA:
-			IWL_DEBUG_RATE(mvm, "LQ: SISO toggle Antenna\n");
-			if (tx_chains_num <= 1)
-				break;
-
-			if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
-			    BT_MBOX_MSG(&mvm->last_bt_notif, 3,
-					TRAFFIC_LOAD) == 0)
-				break;
-
-			memcpy(search_tbl, tbl, sz);
-			if (rs_toggle_antenna(valid_tx_ant,
-					      &search_tbl->current_rate,
-					      search_tbl)) {
-				update_search_tbl_counter = 1;
-				goto out;
-			}
-			break;
-		case IWL_SISO_SWITCH_MIMO2:
-			IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO2\n");
-			memcpy(search_tbl, tbl, sz);
-			search_tbl->is_SGI = 0;
-
-			search_tbl->ant_type = ANT_AB;
-
-			if (!rs_is_valid_ant(valid_tx_ant,
-					     search_tbl->ant_type))
-				break;
-
-			ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
-						 search_tbl, index);
-			if (!ret)
-				goto out;
-			break;
-		case IWL_SISO_SWITCH_GI:
-			if (!rs_sgi_allowed(tbl, sta))
-				break;
-
-			IWL_DEBUG_RATE(mvm, "LQ: SISO toggle SGI/NGI\n");
-
-			memcpy(search_tbl, tbl, sz);
-			search_tbl->is_SGI = !tbl->is_SGI;
-			rs_set_expected_tpt_table(lq_sta, search_tbl);
-			if (tbl->is_SGI) {
-				s32 tpt = lq_sta->last_tpt / 100;
-				if (tpt >= search_tbl->expected_tpt[index])
-					break;
-			}
-			search_tbl->current_rate =
-				rate_n_flags_from_tbl(mvm, search_tbl, index);
-			update_search_tbl_counter = 1;
-			goto out;
-		default:
-			WARN_ON_ONCE(1);
-		}
-		rs_move_next_action(tbl, IWL_SISO_LAST_ACTION);
-
-		if (tbl->action == start_action)
-			break;
-	}
-	search_tbl->lq_type = LQ_NONE;
-	return 0;
-
- out:
-	lq_sta->search_better_tbl = 1;
-	rs_move_next_action(tbl, IWL_SISO_LAST_ACTION);
-	if (update_search_tbl_counter)
-		search_tbl->action = tbl->action;
-
-	return 0;
-}
-
-/*
- * Try to switch to new modulation mode from MIMO2
- */
-static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
-				 struct iwl_lq_sta *lq_sta,
-				 struct ieee80211_sta *sta, int index)
-{
-	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-	struct iwl_scale_tbl_info *search_tbl =
-				&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
-	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
-		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
-	u8 start_action;
-	u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
-	u8 update_search_tbl_counter = 0;
-	int ret;
-
-	start_action = tbl->action;
-	while (1) {
-		lq_sta->action_counter++;
-		switch (tbl->action) {
-		case IWL_MIMO2_SWITCH_SISO_A:
-		case IWL_MIMO2_SWITCH_SISO_B:
-			IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n");
-
-			/* Set up new search table for SISO */
-			memcpy(search_tbl, tbl, sz);
-
-			if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
-				search_tbl->ant_type = ANT_A;
-			else /* tbl->action == IWL_MIMO2_SWITCH_SISO_B */
-				search_tbl->ant_type = ANT_B;
-
-			if (!rs_is_valid_ant(valid_tx_ant,
-					     search_tbl->ant_type))
-				break;
-
-			ret = rs_switch_to_siso(mvm, lq_sta, sta,
-						 search_tbl, index);
-			if (!ret)
-				goto out;
-
-			break;
-
-		case IWL_MIMO2_SWITCH_GI:
-			if (!rs_sgi_allowed(tbl, sta))
-				break;
-
-			IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle SGI/NGI\n");
-
-			/* Set up new search table for MIMO2 */
-			memcpy(search_tbl, tbl, sz);
-			search_tbl->is_SGI = !tbl->is_SGI;
-			rs_set_expected_tpt_table(lq_sta, search_tbl);
-			/*
-			 * If active table already uses the fastest possible
-			 * modulation (dual stream with short guard interval),
-			 * and it's working well, there's no need to look
-			 * for a better type of modulation!
-			 */
-			if (tbl->is_SGI) {
-				s32 tpt = lq_sta->last_tpt / 100;
-				if (tpt >= search_tbl->expected_tpt[index])
-					break;
-			}
-			search_tbl->current_rate =
-				rate_n_flags_from_tbl(mvm, search_tbl, index);
-			update_search_tbl_counter = 1;
-			goto out;
-		default:
-			WARN_ON_ONCE(1);
-		}
-		rs_move_next_action(tbl, IWL_MIMO2_LAST_ACTION);
-
-		if (tbl->action == start_action)
-			break;
-	}
-	search_tbl->lq_type = LQ_NONE;
-	return 0;
- out:
-	lq_sta->search_better_tbl = 1;
-	rs_move_next_action(tbl, IWL_MIMO2_LAST_ACTION);
-	if (update_search_tbl_counter)
-		search_tbl->action = tbl->action;
+		return RATE_MCS_CHAN_WIDTH_40;
 
-	return 0;
+	return RATE_MCS_CHAN_WIDTH_20;
 }
 
 /*
@@ -1591,13 +1373,13 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
 	tbl = &(lq_sta->lq_info[active_tbl]);
 
 	/* If we've been disallowing search, see if we should now allow it */
-	if (lq_sta->stay_in_tbl) {
+	if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
 		/* Elapsed time using current modulation mode */
 		if (lq_sta->flush_timer)
 			flush_interval_passed =
 				time_after(jiffies,
 					   (unsigned long)(lq_sta->flush_timer +
-						IWL_RATE_SCALE_FLUSH_INTVL));
+						RS_STAY_IN_COLUMN_TIMEOUT));
 
 		/*
 		 * Check if we should allow search for new modulation mode.
@@ -1619,10 +1401,14 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
 				     flush_interval_passed);
 
 			/* Allow search for new mode */
-			lq_sta->stay_in_tbl = 0;	/* only place reset */
+			lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_STARTED;
+			IWL_DEBUG_RATE(mvm,
+				       "Moving to RS_STATE_SEARCH_CYCLE_STARTED\n");
 			lq_sta->total_failed = 0;
 			lq_sta->total_success = 0;
 			lq_sta->flush_timer = 0;
+			/* mark the current column as visited */
+			lq_sta->visited_columns = BIT(tbl->column);
 		/*
 		 * Else if we've used this modulation mode enough repetitions
 		 * (regardless of elapsed time or success/failure), reset
@@ -1646,7 +1432,8 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
 		/* If transitioning to allow "search", reset all history
 		 * bitmaps and stats in active table (this will become the new
 		 * "search" table). */
-		if (!lq_sta->stay_in_tbl) {
+		if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
+			IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
 			for (i = 0; i < IWL_RATE_COUNT; i++)
 				rs_rate_scale_clear_window(&(tbl->win[i]));
 		}
@@ -1659,15 +1446,10 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
 static void rs_update_rate_tbl(struct iwl_mvm *mvm,
 			       struct ieee80211_sta *sta,
 			       struct iwl_lq_sta *lq_sta,
-			       struct iwl_scale_tbl_info *tbl,
-			       int index)
+			       struct rs_rate *rate)
 {
-	u32 rate;
-
-	/* Update uCode's rate table. */
-	rate = rate_n_flags_from_tbl(mvm, tbl, index);
-	rs_fill_link_cmd(mvm, sta, lq_sta, rate);
-	iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
+	rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
+	iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
 }
 
 static u8 rs_get_tid(struct iwl_lq_sta *lq_data,
@@ -1686,6 +1468,250 @@ static u8 rs_get_tid(struct iwl_lq_sta *lq_data,
 	return tid;
 }
 
+static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
+					 struct iwl_lq_sta *lq_sta,
+					 struct ieee80211_sta *sta,
+					 struct iwl_scale_tbl_info *tbl)
+{
+	int i, j, n;
+	enum rs_column next_col_id;
+	const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
+	const struct rs_tx_column *next_col;
+	allow_column_func_t allow_func;
+	u8 valid_ants = iwl_fw_valid_tx_ant(mvm->fw);
+	s32 *expected_tpt_tbl;
+	s32 tpt, max_expected_tpt;
+
+	for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
+		next_col_id = curr_col->next_columns[i];
+
+		if (next_col_id == RS_COLUMN_INVALID)
+			continue;
+
+		if (lq_sta->visited_columns & BIT(next_col_id)) {
+			IWL_DEBUG_RATE(mvm, "Skip already visited column %d\n",
+				       next_col_id);
+			continue;
+		}
+
+		next_col = &rs_tx_columns[next_col_id];
+
+		if (!rs_is_valid_ant(valid_ants, next_col->ant)) {
+			IWL_DEBUG_RATE(mvm,
+				       "Skip column %d as ANT config isn't supported by chip. valid_ants 0x%x column ant 0x%x\n",
+				       next_col_id, valid_ants, next_col->ant);
+			continue;
+		}
+
+		for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
+			allow_func = next_col->checks[j];
+			if (allow_func && !allow_func(mvm, sta, tbl))
+				break;
+		}
+
+		if (j != MAX_COLUMN_CHECKS) {
+			IWL_DEBUG_RATE(mvm,
+				       "Skip column %d: not allowed (check %d failed)\n",
+				       next_col_id, j);
+
+			continue;
+		}
+
+		tpt = lq_sta->last_tpt / 100;
+		expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col,
+							     tbl->rate.bw);
+		if (WARN_ON_ONCE(!expected_tpt_tbl))
+			continue;
+
+		max_expected_tpt = 0;
+		for (n = 0; n < IWL_RATE_COUNT; n++)
+			if (expected_tpt_tbl[n] > max_expected_tpt)
+				max_expected_tpt = expected_tpt_tbl[n];
+
+		if (tpt >= max_expected_tpt) {
+			IWL_DEBUG_RATE(mvm,
+				       "Skip column %d: can't beat current TPT. Max expected %d current %d\n",
+				       next_col_id, max_expected_tpt, tpt);
+			continue;
+		}
+
+		break;
+	}
+
+	if (i == MAX_NEXT_COLUMNS)
+		return RS_COLUMN_INVALID;
+
+	IWL_DEBUG_RATE(mvm, "Found potential column %d\n", next_col_id);
+
+	return next_col_id;
+}
+
+static int rs_switch_to_column(struct iwl_mvm *mvm,
+			       struct iwl_lq_sta *lq_sta,
+			       struct ieee80211_sta *sta,
+			       enum rs_column col_id)
+{
+	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+	struct iwl_scale_tbl_info *search_tbl =
+				&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+	struct rs_rate *rate = &search_tbl->rate;
+	const struct rs_tx_column *column = &rs_tx_columns[col_id];
+	const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
+	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+	u16 rate_mask = 0;
+	u32 rate_idx = 0;
+
+	memcpy(search_tbl, tbl, sz);
+
+	rate->sgi = column->sgi;
+	rate->ant = column->ant;
+
+	if (column->mode == RS_LEGACY) {
+		if (lq_sta->band == IEEE80211_BAND_5GHZ)
+			rate->type = LQ_LEGACY_A;
+		else
+			rate->type = LQ_LEGACY_G;
+
+		rate_mask = lq_sta->active_legacy_rate;
+	} else if (column->mode == RS_SISO) {
+		rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
+		rate_mask = lq_sta->active_siso_rate;
+	} else if (column->mode == RS_MIMO2) {
+		rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
+		rate_mask = lq_sta->active_mimo2_rate;
+	} else {
+		WARN_ON_ONCE("Bad column mode");
+	}
+
+	rate->bw = rs_bw_from_sta_bw(sta);
+	search_tbl->column = col_id;
+	rs_set_expected_tpt_table(lq_sta, search_tbl);
+
+	lq_sta->visited_columns |= BIT(col_id);
+
+	/* Get the best matching rate if we're changing modes. e.g.
+	 * SISO->MIMO, LEGACY->SISO, MIMO->SISO
+	 */
+	if (curr_column->mode != column->mode) {
+		rate_idx = rs_get_best_rate(mvm, lq_sta, search_tbl,
+					    rate_mask, rate->index);
+
+		if ((rate_idx == IWL_RATE_INVALID) ||
+		    !(BIT(rate_idx) & rate_mask)) {
+			IWL_DEBUG_RATE(mvm,
+				       "can not switch with index %d"
+				       " rate mask %x\n",
+				       rate_idx, rate_mask);
+
+			goto err;
+		}
+
+		rate->index = rate_idx;
+	}
+
+	IWL_DEBUG_RATE(mvm, "Switched to column %d: Index %d\n",
+		       col_id, rate->index);
+
+	return 0;
+
+err:
+	rate->type = LQ_NONE;
+	return -1;
+}
+
+static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
+					 struct iwl_scale_tbl_info *tbl,
+					 s32 sr, int low, int high,
+					 int current_tpt,
+					 int low_tpt, int high_tpt)
+{
+	enum rs_action action = RS_ACTION_STAY;
+
+	/* Too many failures, decrease rate */
+	if ((sr <= RS_SR_FORCE_DECREASE) || (current_tpt == 0)) {
+		IWL_DEBUG_RATE(mvm,
+			       "decrease rate because of low SR\n");
+		action = RS_ACTION_DOWNSCALE;
+	/* No throughput measured yet for adjacent rates; try increase. */
+	} else if ((low_tpt == IWL_INVALID_VALUE) &&
+		   (high_tpt == IWL_INVALID_VALUE)) {
+		if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH) {
+			IWL_DEBUG_RATE(mvm,
+				       "Good SR and no high rate measurement. "
+				       "Increase rate\n");
+			action = RS_ACTION_UPSCALE;
+		} else if (low != IWL_RATE_INVALID) {
+			IWL_DEBUG_RATE(mvm,
+				       "Remain in current rate\n");
+			action = RS_ACTION_STAY;
+		}
+	}
+
+	/* Both adjacent throughputs are measured, but neither one has better
+	 * throughput; we're using the best rate, don't change it!
+	 */
+	else if ((low_tpt != IWL_INVALID_VALUE) &&
+		 (high_tpt != IWL_INVALID_VALUE) &&
+		 (low_tpt < current_tpt) &&
+		 (high_tpt < current_tpt)) {
+		IWL_DEBUG_RATE(mvm,
+			       "Both high and low are worse. "
+			       "Maintain rate\n");
+		action = RS_ACTION_STAY;
+	}
+
+	/* At least one adjacent rate's throughput is measured,
+	 * and may have better performance.
+	 */
+	else {
+		/* Higher adjacent rate's throughput is measured */
+		if (high_tpt != IWL_INVALID_VALUE) {
+			/* Higher rate has better throughput */
+			if (high_tpt > current_tpt &&
+			    sr >= IWL_RATE_INCREASE_TH) {
+				IWL_DEBUG_RATE(mvm,
+					       "Higher rate is better and good "
+					       "SR. Increate rate\n");
+				action = RS_ACTION_UPSCALE;
+			} else {
+				IWL_DEBUG_RATE(mvm,
+					       "Higher rate isn't better OR "
+					       "no good SR. Maintain rate\n");
+				action = RS_ACTION_STAY;
+			}
+
+		/* Lower adjacent rate's throughput is measured */
+		} else if (low_tpt != IWL_INVALID_VALUE) {
+			/* Lower rate has better throughput */
+			if (low_tpt > current_tpt) {
+				IWL_DEBUG_RATE(mvm,
+					       "Lower rate is better. "
+					       "Decrease rate\n");
+				action = RS_ACTION_DOWNSCALE;
+			} else if (sr >= IWL_RATE_INCREASE_TH) {
+				IWL_DEBUG_RATE(mvm,
+					       "Lower rate isn't better and "
+					       "good SR. Increase rate\n");
+				action = RS_ACTION_UPSCALE;
+			}
+		}
+	}
+
+	/* Sanity check; asked for decrease, but success rate or throughput
+	 * has been good at old rate.  Don't change it.
+	 */
+	if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID) &&
+	    ((sr > IWL_RATE_HIGH_TH) ||
+	     (current_tpt > (100 * tbl->expected_tpt[low])))) {
+		IWL_DEBUG_RATE(mvm,
+			       "Sanity check failed. Maintain rate\n");
+		action = RS_ACTION_STAY;
+	}
+
+	return action;
+}
+
 /*
  * Do rate scaling and search for new modulation mode.
  */
@@ -1705,20 +1731,19 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
 	int low_tpt = IWL_INVALID_VALUE;
 	int high_tpt = IWL_INVALID_VALUE;
 	u32 fail_count;
-	s8 scale_action = 0;
+	enum rs_action scale_action = RS_ACTION_STAY;
 	u16 rate_mask;
 	u8 update_lq = 0;
 	struct iwl_scale_tbl_info *tbl, *tbl1;
-	u16 rate_scale_index_msk = 0;
 	u8 active_tbl = 0;
 	u8 done_search = 0;
 	u16 high_low;
 	s32 sr;
 	u8 tid = IWL_MAX_TID_COUNT;
+	u8 prev_agg = lq_sta->is_agg;
 	struct iwl_mvm_sta *sta_priv = (void *)sta->drv_priv;
 	struct iwl_mvm_tid_data *tid_data;
-
-	IWL_DEBUG_RATE(mvm, "rate scale calculate new rate for skb\n");
+	struct rs_rate *rate;
 
 	/* Send management frames and NO_ACK data using lowest rate. */
 	/* TODO: this could probably be improved.. */
@@ -1726,8 +1751,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
 	    info->flags & IEEE80211_TX_CTL_NO_ACK)
 		return;
 
-	lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
-
 	tid = rs_get_tid(lq_sta, hdr);
 	if ((tid != IWL_MAX_TID_COUNT) &&
 	    (lq_sta->tx_agg_tid_en & (1 << tid))) {
@@ -1751,45 +1774,29 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
 		active_tbl = 1 - lq_sta->active_tbl;
 
 	tbl = &(lq_sta->lq_info[active_tbl]);
+	rate = &tbl->rate;
+
+	if (prev_agg != lq_sta->is_agg) {
+		IWL_DEBUG_RATE(mvm,
+			       "Aggregation changed: prev %d current %d. Update expected TPT table\n",
+			       prev_agg, lq_sta->is_agg);
+		rs_set_expected_tpt_table(lq_sta, tbl);
+	}
 
 	/* current tx rate */
 	index = lq_sta->last_txrate_idx;
 
-	IWL_DEBUG_RATE(mvm, "Rate scale index %d for type %d\n", index,
-		       tbl->lq_type);
-
 	/* rates available for this association, and for modulation mode */
-	rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
+	rate_mask = rs_get_supported_rates(lq_sta, rate);
 
-	IWL_DEBUG_RATE(mvm, "mask 0x%04X\n", rate_mask);
-
-	/* mask with station rate restriction */
-	if (is_legacy(tbl->lq_type)) {
-		if (lq_sta->band == IEEE80211_BAND_5GHZ)
-			/* supp_rates has no CCK bits in A mode */
-			rate_scale_index_msk = (u16) (rate_mask &
-				(lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
-		else
-			rate_scale_index_msk = (u16) (rate_mask &
-						      lq_sta->supp_rates);
-
-	} else {
-		rate_scale_index_msk = rate_mask;
-	}
-
-	if (!rate_scale_index_msk)
-		rate_scale_index_msk = rate_mask;
-
-	if (!((1 << index) & rate_scale_index_msk)) {
+	if (!(BIT(index) & rate_mask)) {
 		IWL_ERR(mvm, "Current Rate is not valid\n");
 		if (lq_sta->search_better_tbl) {
 			/* revert to active table if search table is not valid*/
-			tbl->lq_type = LQ_NONE;
+			rate->type = LQ_NONE;
 			lq_sta->search_better_tbl = 0;
 			tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-			/* get "active" rate info */
-			index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
-			rs_update_rate_tbl(mvm, sta, lq_sta, tbl, index);
+			rs_update_rate_tbl(mvm, sta, lq_sta, &tbl->rate);
 		}
 		return;
 	}
@@ -1806,6 +1813,9 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
 		index = lq_sta->max_rate_idx;
 		update_lq = 1;
 		window = &(tbl->win[index]);
+		IWL_DEBUG_RATE(mvm,
+			       "Forcing user max rate %d\n",
+			       index);
 		goto lq_update;
 	}
 
@@ -1822,8 +1832,9 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
 	if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
 	    (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
 		IWL_DEBUG_RATE(mvm,
-			       "LQ: still below TH. succ=%d total=%d for index %d\n",
-			       window->success_counter, window->counter, index);
+			       "(%s: %d): Test Window: succ %d total %d\n",
+			       rs_pretty_lq_type(rate->type),
+			       index, window->success_counter, window->counter);
 
 		/* Can't calculate this yet; not enough history */
 		window->average_tpt = IWL_INVALID_VALUE;
@@ -1838,8 +1849,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
 	 * actual average throughput */
 	if (window->average_tpt != ((window->success_ratio *
 			tbl->expected_tpt[index] + 64) / 128)) {
-		IWL_ERR(mvm,
-			"expected_tpt should have been calculated by now\n");
 		window->average_tpt = ((window->success_ratio *
 					tbl->expected_tpt[index] + 64) / 128);
 	}
@@ -1851,34 +1860,33 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
 		 * continuing to use the setup that we've been trying. */
 		if (window->average_tpt > lq_sta->last_tpt) {
 			IWL_DEBUG_RATE(mvm,
-				       "LQ: SWITCHING TO NEW TABLE suc=%d cur-tpt=%d old-tpt=%d\n",
+				       "SWITCHING TO NEW TABLE SR: %d "
+				       "cur-tpt %d old-tpt %d\n",
 				       window->success_ratio,
 				       window->average_tpt,
 				       lq_sta->last_tpt);
 
-			if (!is_legacy(tbl->lq_type))
-				lq_sta->enable_counter = 1;
-
 			/* Swap tables; "search" becomes "active" */
 			lq_sta->active_tbl = active_tbl;
 			current_tpt = window->average_tpt;
 		/* Else poor success; go back to mode in "active" table */
 		} else {
 			IWL_DEBUG_RATE(mvm,
-				       "LQ: GOING BACK TO THE OLD TABLE suc=%d cur-tpt=%d old-tpt=%d\n",
+				       "GOING BACK TO THE OLD TABLE: SR %d "
+				       "cur-tpt %d old-tpt %d\n",
 				       window->success_ratio,
 				       window->average_tpt,
 				       lq_sta->last_tpt);
 
 			/* Nullify "search" table */
-			tbl->lq_type = LQ_NONE;
+			rate->type = LQ_NONE;
 
 			/* Revert to "active" table */
 			active_tbl = lq_sta->active_tbl;
 			tbl = &(lq_sta->lq_info[active_tbl]);
 
 			/* Revert to "active" rate and throughput info */
-			index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
+			index = tbl->rate.index;
 			current_tpt = lq_sta->last_tpt;
 
 			/* Need to set up a new rate table in uCode */
@@ -1894,8 +1902,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
 
 	/* (Else) not in search of better modulation mode, try for better
 	 * starting rate, while staying in this mode. */
-	high_low = rs_get_adjacent_rate(mvm, index, rate_scale_index_msk,
-					tbl->lq_type);
+	high_low = rs_get_adjacent_rate(mvm, index, rate_mask, rate->type);
 	low = high_low & 0xff;
 	high = (high_low >> 8) & 0xff;
 
@@ -1913,118 +1920,58 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
 	if (high != IWL_RATE_INVALID)
 		high_tpt = tbl->win[high].average_tpt;
 
-	scale_action = 0;
-
-	/* Too many failures, decrease rate */
-	if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
-		IWL_DEBUG_RATE(mvm,
-			       "decrease rate because of low success_ratio\n");
-		scale_action = -1;
-	/* No throughput measured yet for adjacent rates; try increase. */
-	} else if ((low_tpt == IWL_INVALID_VALUE) &&
-		   (high_tpt == IWL_INVALID_VALUE)) {
-		if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
-			scale_action = 1;
-		else if (low != IWL_RATE_INVALID)
-			scale_action = 0;
-	}
-
-	/* Both adjacent throughputs are measured, but neither one has better
-	 * throughput; we're using the best rate, don't change it! */
-	else if ((low_tpt != IWL_INVALID_VALUE) &&
-		 (high_tpt != IWL_INVALID_VALUE) &&
-		 (low_tpt < current_tpt) &&
-		 (high_tpt < current_tpt))
-		scale_action = 0;
-
-	/* At least one adjacent rate's throughput is measured,
-	 * and may have better performance. */
-	else {
-		/* Higher adjacent rate's throughput is measured */
-		if (high_tpt != IWL_INVALID_VALUE) {
-			/* Higher rate has better throughput */
-			if (high_tpt > current_tpt &&
-			    sr >= IWL_RATE_INCREASE_TH) {
-				scale_action = 1;
-			} else {
-				scale_action = 0;
-			}
-
-		/* Lower adjacent rate's throughput is measured */
-		} else if (low_tpt != IWL_INVALID_VALUE) {
-			/* Lower rate has better throughput */
-			if (low_tpt > current_tpt) {
-				IWL_DEBUG_RATE(mvm,
-					       "decrease rate because of low tpt\n");
-				scale_action = -1;
-			} else if (sr >= IWL_RATE_INCREASE_TH) {
-				scale_action = 1;
-			}
-		}
-	}
-
-	/* Sanity check; asked for decrease, but success rate or throughput
-	 * has been good at old rate.  Don't change it. */
-	if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
-	    ((sr > IWL_RATE_HIGH_TH) ||
-	     (current_tpt > (100 * tbl->expected_tpt[low]))))
-		scale_action = 0;
+	IWL_DEBUG_RATE(mvm,
+		       "(%s: %d): cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n",
+		       rs_pretty_lq_type(rate->type), index, current_tpt, sr,
+		       low, high, low_tpt, high_tpt);
 
-	if ((le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >=
-	     IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && (is_mimo(tbl->lq_type))) {
-		if (lq_sta->last_bt_traffic >
-		    le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)) {
-			/*
-			 * don't set scale_action, don't want to scale up if
-			 * the rate scale doesn't otherwise think that is a
-			 * good idea.
-			 */
-		} else if (lq_sta->last_bt_traffic <=
-			   le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)) {
-			scale_action = -1;
-		}
-	}
-	lq_sta->last_bt_traffic =
-		le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
+	scale_action = rs_get_rate_action(mvm, tbl, sr, low, high,
+					  current_tpt, low_tpt, high_tpt);
 
-	if ((le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >=
-	     IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && is_mimo(tbl->lq_type)) {
-		/* search for a new modulation */
+	/* Force a search in case BT doesn't like us being in MIMO */
+	if (is_mimo(rate) &&
+	    !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) {
+		IWL_DEBUG_RATE(mvm,
+			       "BT Coex forbids MIMO. Search for new config\n");
 		rs_stay_in_table(lq_sta, true);
 		goto lq_update;
 	}
 
 	switch (scale_action) {
-	case -1:
+	case RS_ACTION_DOWNSCALE:
 		/* Decrease starting rate, update uCode's rate table */
 		if (low != IWL_RATE_INVALID) {
 			update_lq = 1;
 			index = low;
+		} else {
+			IWL_DEBUG_RATE(mvm,
+				       "At the bottom rate. Can't decrease\n");
 		}
 
 		break;
-	case 1:
+	case RS_ACTION_UPSCALE:
 		/* Increase starting rate, update uCode's rate table */
 		if (high != IWL_RATE_INVALID) {
 			update_lq = 1;
 			index = high;
+		} else {
+			IWL_DEBUG_RATE(mvm,
+				       "At the top rate. Can't increase\n");
 		}
 
 		break;
-	case 0:
+	case RS_ACTION_STAY:
 		/* No change */
 	default:
 		break;
 	}
 
-	IWL_DEBUG_RATE(mvm,
-		       "choose rate scale index %d action %d low %d high %d type %d\n",
-		       index, scale_action, low, high, tbl->lq_type);
-
 lq_update:
 	/* Replace uCode's rate table for the destination station. */
-	if (update_lq)
-		rs_update_rate_tbl(mvm, sta, lq_sta, tbl, index);
+	if (update_lq) {
+		tbl->rate.index = index;
+		rs_update_rate_tbl(mvm, sta, lq_sta, &tbl->rate);
+	}
 
 	rs_stay_in_table(lq_sta, false);
 
@@ -2035,20 +1982,29 @@ lq_update:
 	 * 3)  Allowing a new search
 	 */
 	if (!update_lq && !done_search &&
-	    !lq_sta->stay_in_tbl && window->counter) {
+	    lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED
+	    && window->counter) {
+		enum rs_column next_column;
+
 		/* Save current throughput to compare with "search" throughput*/
 		lq_sta->last_tpt = current_tpt;
 
-		/* Select a new "search" modulation mode to try.
-		 * If one is found, set up the new "search" table. */
-		if (is_legacy(tbl->lq_type))
-			rs_move_legacy_other(mvm, lq_sta, sta, index);
-		else if (is_siso(tbl->lq_type))
-			rs_move_siso_to_other(mvm, lq_sta, sta, index);
-		else if (is_mimo2(tbl->lq_type))
-			rs_move_mimo2_to_other(mvm, lq_sta, sta, index);
-		else
-			WARN_ON_ONCE(1);
+		IWL_DEBUG_RATE(mvm,
+			       "Start Search: update_lq %d done_search %d rs_state %d win->counter %d\n",
+			       update_lq, done_search, lq_sta->rs_state,
+			       window->counter);
+
+		next_column = rs_get_next_column(mvm, lq_sta, sta, tbl);
+		if (next_column != RS_COLUMN_INVALID) {
+			int ret = rs_switch_to_column(mvm, lq_sta, sta,
+						      next_column);
+			if (!ret)
+				lq_sta->search_better_tbl = 1;
+		} else {
+			IWL_DEBUG_RATE(mvm,
+				       "No more columns to explore in search cycle. Go to RS_STATE_SEARCH_CYCLE_ENDED\n");
+			lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_ENDED;
+		}
 
 		/* If new "search" mode was selected, set up in uCode table */
 		if (lq_sta->search_better_tbl) {
@@ -2058,36 +2014,31 @@ lq_update:
 				rs_rate_scale_clear_window(&(tbl->win[i]));
 
 			/* Use new "search" start rate */
-			index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
+			index = tbl->rate.index;
 
-			IWL_DEBUG_RATE(mvm,
-				       "Switch current  mcs: %X index: %d\n",
-				       tbl->current_rate, index);
-			rs_fill_link_cmd(mvm, sta, lq_sta, tbl->current_rate);
-			iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
+			rs_dump_rate(mvm, &tbl->rate,
+				     "Switch to SEARCH TABLE:");
+			rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
+			iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
 		} else {
 			done_search = 1;
 		}
 	}
 
-	if (done_search && !lq_sta->stay_in_tbl) {
+	if (done_search && lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_ENDED) {
 		/* If the "active" (non-search) mode was legacy,
 		 * and we've tried switching antennas,
 		 * but we haven't been able to try HT modes (not available),
 		 * stay with best antenna legacy modulation for a while
 		 * before next round of mode comparisons. */
 		tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
-		if (is_legacy(tbl1->lq_type) && !sta->ht_cap.ht_supported &&
-		    lq_sta->action_counter > tbl1->max_search) {
+		if (is_legacy(&tbl1->rate) && !sta->ht_cap.ht_supported) {
 			IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n");
 			rs_set_stay_in_table(mvm, 1, lq_sta);
-		}
-
+		} else {
 		/* If we're in an HT mode, and all 3 mode switch actions
 		 * have been tried and compared, stay in this best modulation
 		 * mode for a while before next round of mode comparisons. */
-		if (lq_sta->enable_counter &&
-		    (lq_sta->action_counter >= tbl1->max_search)) {
 			if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
 			    (lq_sta->tx_agg_tid_en & (1 << tid)) &&
 			    (tid != IWL_MAX_TID_COUNT)) {
@@ -2105,7 +2056,6 @@ lq_update:
 	}
 
 out:
-	tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index);
 	lq_sta->last_txrate_idx = index;
 }
 
@@ -2126,12 +2076,12 @@ out:
 static void rs_initialize_lq(struct iwl_mvm *mvm,
 			     struct ieee80211_sta *sta,
 			     struct iwl_lq_sta *lq_sta,
-			     enum ieee80211_band band)
+			     enum ieee80211_band band,
+			     bool init)
 {
 	struct iwl_scale_tbl_info *tbl;
-	int rate_idx;
+	struct rs_rate *rate;
 	int i;
-	u32 rate;
 	u8 active_tbl = 0;
 	u8 valid_tx_ant;
 
@@ -2148,27 +2098,30 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
 		active_tbl = 1 - lq_sta->active_tbl;
 
 	tbl = &(lq_sta->lq_info[active_tbl]);
+	rate = &tbl->rate;
 
 	if ((i < 0) || (i >= IWL_RATE_COUNT))
 		i = 0;
 
-	rate = iwl_rates[i].plcp;
-	tbl->ant_type = first_antenna(valid_tx_ant);
-	rate |= tbl->ant_type << RATE_MCS_ANT_POS;
-
-	if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
-		rate |= RATE_MCS_CCK_MSK;
+	rate->index = i;
+	rate->ant = first_antenna(valid_tx_ant);
+	rate->sgi = false;
+	rate->bw = RATE_MCS_CHAN_WIDTH_20;
+	if (band == IEEE80211_BAND_5GHZ)
+		rate->type = LQ_LEGACY_A;
+	else
+		rate->type = LQ_LEGACY_G;
 
-	rs_get_tbl_info_from_mcs(rate, band, tbl, &rate_idx);
-	if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
-		rs_toggle_antenna(valid_tx_ant, &rate, tbl);
+	WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
+	if (rate->ant == ANT_A)
+		tbl->column = RS_COLUMN_LEGACY_ANT_A;
+	else
+		tbl->column = RS_COLUMN_LEGACY_ANT_B;
 
-	rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx);
-	tbl->current_rate = rate;
 	rs_set_expected_tpt_table(lq_sta, tbl);
-	rs_fill_link_cmd(NULL, NULL, lq_sta, rate);
+	rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
 	/* TODO restore station should remember the lq cmd */
-	iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_SYNC, true);
+	iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, init);
 }
 
 static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
@@ -2182,8 +2135,6 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct iwl_lq_sta *lq_sta = mvm_sta;
 
-	IWL_DEBUG_RATE_LIMIT(mvm, "rate scale calculate new rate for skb\n");
-
 	/* Get max rate if user set max rate */
 	if (lq_sta) {
 		lq_sta->max_rate_idx = txrc->max_rate_idx;
@@ -2242,11 +2193,59 @@ static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
 	return -1;
 }
 
+static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
+				     struct ieee80211_sta_vht_cap *vht_cap,
+				     struct iwl_lq_sta *lq_sta)
+{
+	int i;
+	int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
+
+	if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+		for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+			if (i == IWL_RATE_9M_INDEX)
+				continue;
+
+			/* Disable MCS9 as a workaround */
+			if (i == IWL_RATE_MCS_9_INDEX)
+				continue;
+
+			/* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
+			if (i == IWL_RATE_MCS_9_INDEX &&
+			    sta->bandwidth == IEEE80211_STA_RX_BW_20)
+				continue;
+
+			lq_sta->active_siso_rate |= BIT(i);
+		}
+	}
+
+	if (sta->rx_nss < 2)
+		return;
+
+	highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
+	if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+		for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+			if (i == IWL_RATE_9M_INDEX)
+				continue;
+
+			/* Disable MCS9 as a workaround */
+			if (i == IWL_RATE_MCS_9_INDEX)
+				continue;
+
+			/* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
+			if (i == IWL_RATE_MCS_9_INDEX &&
+			    sta->bandwidth == IEEE80211_STA_RX_BW_20)
+				continue;
+
+			lq_sta->active_mimo2_rate |= BIT(i);
+		}
+	}
+}
+
 /*
  * Called after adding a new station to initialize rate scaling
  */
 void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			  enum ieee80211_band band)
+			  enum ieee80211_band band, bool init)
 {
 	int i, j;
 	struct ieee80211_hw *hw = mvm->hw;
@@ -2259,6 +2258,8 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 
 	sta_priv = (struct iwl_mvm_sta *)sta->drv_priv;
 	lq_sta = &sta_priv->lq_sta;
+	memset(lq_sta, 0, sizeof(*lq_sta));
+
 	sband = hw->wiphy->bands[band];
 
 	lq_sta->lq.sta_id = sta_priv->sta_id;
@@ -2268,7 +2269,6 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 			rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
 
 	lq_sta->flush_timer = 0;
-	lq_sta->supp_rates = sta->supp_rates[sband->band];
 
 	IWL_DEBUG_RATE(mvm,
 		       "LQ: *** rate scale station global init for station %d ***\n",
@@ -2308,27 +2308,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 
 		lq_sta->is_vht = false;
 	} else {
-		int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
-		if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
-			for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
-				if (i == IWL_RATE_9M_INDEX)
-					continue;
-
-				lq_sta->active_siso_rate |= BIT(i);
-			}
-		}
-
-		highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
-		if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
-			for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
-				if (i == IWL_RATE_9M_INDEX)
-					continue;
-
-				lq_sta->active_mimo2_rate |= BIT(i);
-			}
-		}
-
-		/* TODO: avoid MCS9 in 20Mhz which isn't valid for 11ac */
+		rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
 		lq_sta->is_vht = true;
 	}
 
@@ -2341,15 +2321,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 	/* These values will be overridden later */
 	lq_sta->lq.single_stream_ant_msk =
 		first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
-	lq_sta->lq.dual_stream_ant_msk =
-		iwl_fw_valid_tx_ant(mvm->fw) &
-		~first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
-	if (!lq_sta->lq.dual_stream_ant_msk) {
-		lq_sta->lq.dual_stream_ant_msk = ANT_AB;
-	} else if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) == 2) {
-		lq_sta->lq.dual_stream_ant_msk =
-			iwl_fw_valid_tx_ant(mvm->fw);
-	}
+	lq_sta->lq.dual_stream_ant_msk = ANT_AB;
 
 	/* as default allow aggregation for all tids */
 	lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
@@ -2364,121 +2336,184 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 	lq_sta->dbg_fixed_rate = 0;
 #endif
 
-	rs_initialize_lq(mvm, sta, lq_sta, band);
+	rs_initialize_lq(mvm, sta, lq_sta, band, init);
 }
 
-static void rs_fill_link_cmd(struct iwl_mvm *mvm,
-			     struct ieee80211_sta *sta,
-			     struct iwl_lq_sta *lq_sta, u32 new_rate)
+static void rs_rate_update(void *mvm_r,
+			   struct ieee80211_supported_band *sband,
+			   struct cfg80211_chan_def *chandef,
+			   struct ieee80211_sta *sta, void *priv_sta,
+			   u32 changed)
+{
+	u8 tid;
+	struct iwl_op_mode *op_mode  =
+			(struct iwl_op_mode *)mvm_r;
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+	/* Stop any ongoing aggregations as rs starts off assuming no agg */
+	for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+		ieee80211_stop_tx_ba_session(sta, tid);
+
+	iwl_mvm_rs_rate_init(mvm, sta, sband->band, false);
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
+					    struct iwl_lq_cmd *lq_cmd,
+					    enum ieee80211_band band,
+					    u32 ucode_rate)
+{
+	struct rs_rate rate;
+	int i;
+	int num_rates = ARRAY_SIZE(lq_cmd->rs_table);
+	__le32 ucode_rate_le32 = cpu_to_le32(ucode_rate);
+
+	for (i = 0; i < num_rates; i++)
+		lq_cmd->rs_table[i] = ucode_rate_le32;
+
+	rs_rate_from_ucode_rate(ucode_rate, band, &rate);
+
+	if (is_mimo(&rate))
+		lq_cmd->mimo_delim = num_rates - 1;
+	else
+		lq_cmd->mimo_delim = 0;
+}
+#endif /* CONFIG_MAC80211_DEBUGFS */
+
+static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
+				     struct iwl_lq_sta *lq_sta,
+				     struct rs_rate *rate,
+				     __le32 *rs_table, int *rs_table_index,
+				     int num_rates, int num_retries,
+				     u8 valid_tx_ant, bool toggle_ant)
+{
+	int i, j;
+	__le32 ucode_rate;
+	bool bottom_reached = false;
+	int prev_rate_idx = rate->index;
+	int end = LINK_QUAL_MAX_RETRY_NUM;
+	int index = *rs_table_index;
+
+	for (i = 0; i < num_rates && index < end; i++) {
+		ucode_rate = cpu_to_le32(ucode_rate_from_rs_rate(mvm, rate));
+		for (j = 0; j < num_retries && index < end; j++, index++)
+			rs_table[index] = ucode_rate;
+
+		if (toggle_ant)
+			rs_toggle_antenna(valid_tx_ant, rate);
+
+		prev_rate_idx = rate->index;
+		bottom_reached = rs_get_lower_rate_in_column(lq_sta, rate);
+		if (bottom_reached && !is_legacy(rate))
+			break;
+	}
+
+	if (!bottom_reached)
+		rate->index = prev_rate_idx;
+
+	*rs_table_index = index;
+}
+
+/* Building the rate table is non trivial. When we're in MIMO2/VHT/80Mhz/SGI
+ * column the rate table should look like this:
+ *
+ * rate[0] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
+ * rate[1] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
+ * rate[2] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
+ * rate[3] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
+ * rate[4] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
+ * rate[5] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
+ * rate[6] 0x4005007 VHT | ANT: A BW: 80Mhz MCS: 7 NSS: 1 NGI
+ * rate[7] 0x4009006 VHT | ANT: B BW: 80Mhz MCS: 6 NSS: 1 NGI
+ * rate[8] 0x4005005 VHT | ANT: A BW: 80Mhz MCS: 5 NSS: 1 NGI
+ * rate[9] 0x800B Legacy | ANT: B Rate: 36 Mbps
+ * rate[10] 0x4009 Legacy | ANT: A Rate: 24 Mbps
+ * rate[11] 0x8007 Legacy | ANT: B Rate: 18 Mbps
+ * rate[12] 0x4005 Legacy | ANT: A Rate: 12 Mbps
+ * rate[13] 0x800F Legacy | ANT: B Rate: 9 Mbps
+ * rate[14] 0x400D Legacy | ANT: A Rate: 6 Mbps
+ * rate[15] 0x800D Legacy | ANT: B Rate: 6 Mbps
+ */
+static void rs_build_rates_table(struct iwl_mvm *mvm,
+				 struct iwl_lq_sta *lq_sta,
+				 const struct rs_rate *initial_rate)
 {
-	struct iwl_scale_tbl_info tbl_type;
-	int index = 0;
-	int rate_idx;
-	int repeat_rate = 0;
-	u8 ant_toggle_cnt = 0;
-	u8 use_ht_possible = 1;
+	struct rs_rate rate;
+	int num_rates, num_retries, index = 0;
 	u8 valid_tx_ant = 0;
 	struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
+	bool toggle_ant = false;
 
-	/* Override starting rate (index 0) if needed for debug purposes */
-	rs_dbgfs_set_mcs(lq_sta, &new_rate);
+	memcpy(&rate, initial_rate, sizeof(rate));
 
-	/* Interpret new_rate (rate_n_flags) */
-	rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
-				 &tbl_type, &rate_idx);
+	valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
 
-	/* How many times should we repeat the initial rate? */
-	if (is_legacy(tbl_type.lq_type)) {
-		ant_toggle_cnt = 1;
-		repeat_rate = IWL_NUMBER_TRY;
+	if (is_siso(&rate)) {
+		num_rates = RS_INITIAL_SISO_NUM_RATES;
+		num_retries = RS_HT_VHT_RETRIES_PER_RATE;
+	} else if (is_mimo(&rate)) {
+		num_rates = RS_INITIAL_MIMO_NUM_RATES;
+		num_retries = RS_HT_VHT_RETRIES_PER_RATE;
 	} else {
-		repeat_rate = min(IWL_HT_NUMBER_TRY,
-				  LINK_QUAL_AGG_DISABLE_START_DEF - 1);
+		num_rates = RS_INITIAL_LEGACY_NUM_RATES;
+		num_retries = RS_LEGACY_RETRIES_PER_RATE;
+		toggle_ant = true;
 	}
 
-	lq_cmd->mimo_delim = is_mimo(tbl_type.lq_type) ? 1 : 0;
-
-	/* Fill 1st table entry (index 0) */
-	lq_cmd->rs_table[index] = cpu_to_le32(new_rate);
-
-	if (num_of_ant(tbl_type.ant_type) == 1)
-		lq_cmd->single_stream_ant_msk = tbl_type.ant_type;
-	else if (num_of_ant(tbl_type.ant_type) == 2)
-		lq_cmd->dual_stream_ant_msk = tbl_type.ant_type;
-	/* otherwise we don't modify the existing value */
-
-	index++;
-	repeat_rate--;
-	if (mvm)
-		valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
-
-	/* Fill rest of rate table */
-	while (index < LINK_QUAL_MAX_RETRY_NUM) {
-		/* Repeat initial/next rate.
-		 * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
-		 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
-		while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
-			if (is_legacy(tbl_type.lq_type)) {
-				if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
-					ant_toggle_cnt++;
-				else if (mvm &&
-					 rs_toggle_antenna(valid_tx_ant,
-							&new_rate, &tbl_type))
-					ant_toggle_cnt = 1;
-			}
+	rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
+				 num_rates, num_retries, valid_tx_ant,
+				 toggle_ant);
 
-			/* Override next rate if needed for debug purposes */
-			rs_dbgfs_set_mcs(lq_sta, &new_rate);
+	rs_get_lower_rate_down_column(lq_sta, &rate);
 
-			/* Fill next table entry */
-			lq_cmd->rs_table[index] =
-					cpu_to_le32(new_rate);
-			repeat_rate--;
-			index++;
-		}
+	if (is_siso(&rate)) {
+		num_rates = RS_SECONDARY_SISO_NUM_RATES;
+		num_retries = RS_SECONDARY_SISO_RETRIES;
+	} else if (is_legacy(&rate)) {
+		num_rates = RS_SECONDARY_LEGACY_NUM_RATES;
+		num_retries = RS_LEGACY_RETRIES_PER_RATE;
+	} else {
+		WARN_ON_ONCE(1);
+	}
 
-		rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
-					 &rate_idx);
-
-		/* Indicate to uCode which entries might be MIMO.
-		 * If initial rate was MIMO, this will finally end up
-		 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
-		if (is_mimo(tbl_type.lq_type))
-			lq_cmd->mimo_delim = index;
-
-		/* Get next rate */
-		new_rate = rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
-					     use_ht_possible);
-
-		/* How many times should we repeat the next rate? */
-		if (is_legacy(tbl_type.lq_type)) {
-			if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
-				ant_toggle_cnt++;
-			else if (mvm &&
-				 rs_toggle_antenna(valid_tx_ant,
-						   &new_rate, &tbl_type))
-				ant_toggle_cnt = 1;
-
-			repeat_rate = IWL_NUMBER_TRY;
-		} else {
-			repeat_rate = IWL_HT_NUMBER_TRY;
-		}
+	toggle_ant = true;
 
-		/* Don't allow HT rates after next pass.
-		 * rs_get_lower_rate() will change type to LQ_LEGACY_A
-		 * or LQ_LEGACY_G.
-		 */
-		use_ht_possible = 0;
+	rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
+				 num_rates, num_retries, valid_tx_ant,
+				 toggle_ant);
 
-		/* Override next rate if needed for debug purposes */
-		rs_dbgfs_set_mcs(lq_sta, &new_rate);
+	rs_get_lower_rate_down_column(lq_sta, &rate);
 
-		/* Fill next table entry */
-		lq_cmd->rs_table[index] = cpu_to_le32(new_rate);
+	num_rates = RS_SECONDARY_LEGACY_NUM_RATES;
+	num_retries = RS_LEGACY_RETRIES_PER_RATE;
 
-		index++;
-		repeat_rate--;
-	}
+	rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
+				 num_rates, num_retries, valid_tx_ant,
+				 toggle_ant);
+
+}
+
+static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
+			   struct ieee80211_sta *sta,
+			   struct iwl_lq_sta *lq_sta,
+			   const struct rs_rate *initial_rate)
+{
+	struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
+	u8 ant = initial_rate->ant;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+	if (lq_sta->dbg_fixed_rate) {
+		rs_build_rates_table_from_fixed(mvm, lq_cmd,
+						lq_sta->band,
+						lq_sta->dbg_fixed_rate);
+		ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
+			RATE_MCS_ANT_POS;
+	} else
+#endif
+		rs_build_rates_table(mvm, lq_sta, initial_rate);
+
+	if (num_of_ant(ant) == 1)
+		lq_cmd->single_stream_ant_msk = ant;
 
 	lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
 	lq_cmd->agg_disable_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
@@ -2512,31 +2547,83 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
 }
 
 #ifdef CONFIG_MAC80211_DEBUGFS
-static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
-			     u32 *rate_n_flags)
+static int rs_pretty_print_rate(char *buf, const u32 rate)
 {
-	struct iwl_mvm *mvm;
-	u8 valid_tx_ant;
-	u8 ant_sel_tx;
 
-	mvm = lq_sta->drv;
-	valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
-	if (lq_sta->dbg_fixed_rate) {
-		ant_sel_tx =
-		  ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
-		  >> RATE_MCS_ANT_POS);
-		if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
-			*rate_n_flags = lq_sta->dbg_fixed_rate;
-			IWL_DEBUG_RATE(mvm, "Fixed rate ON\n");
-		} else {
-			lq_sta->dbg_fixed_rate = 0;
-			IWL_ERR(mvm,
-				"Invalid antenna selection 0x%X, Valid is 0x%X\n",
-				ant_sel_tx, valid_tx_ant);
-			IWL_DEBUG_RATE(mvm, "Fixed rate OFF\n");
-		}
+	char *type, *bw;
+	u8 mcs = 0, nss = 0;
+	u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
+
+	if (!(rate & RATE_MCS_HT_MSK) &&
+	    !(rate & RATE_MCS_VHT_MSK)) {
+		int index = iwl_hwrate_to_plcp_idx(rate);
+
+		return sprintf(buf, "Legacy | ANT: %s Rate: %s Mbps\n",
+			       rs_pretty_ant(ant),
+			       index == IWL_RATE_INVALID ? "BAD" :
+			       iwl_rate_mcs[index].mbps);
+	}
+
+	if (rate & RATE_MCS_VHT_MSK) {
+		type = "VHT";
+		mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
+		nss = ((rate & RATE_VHT_MCS_NSS_MSK)
+		       >> RATE_VHT_MCS_NSS_POS) + 1;
+	} else if (rate & RATE_MCS_HT_MSK) {
+		type = "HT";
+		mcs = rate & RATE_HT_MCS_INDEX_MSK;
 	} else {
-		IWL_DEBUG_RATE(mvm, "Fixed rate OFF\n");
+		type = "Unknown"; /* shouldn't happen */
+	}
+
+	switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
+	case RATE_MCS_CHAN_WIDTH_20:
+		bw = "20Mhz";
+		break;
+	case RATE_MCS_CHAN_WIDTH_40:
+		bw = "40Mhz";
+		break;
+	case RATE_MCS_CHAN_WIDTH_80:
+		bw = "80Mhz";
+		break;
+	case RATE_MCS_CHAN_WIDTH_160:
+		bw = "160Mhz";
+		break;
+	default:
+		bw = "BAD BW";
+	}
+
+	return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s\n",
+		       type, rs_pretty_ant(ant), bw, mcs, nss,
+		       (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
+		       (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
+		       (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
+		       (rate & RATE_MCS_BF_MSK) ? "BF " : "",
+		       (rate & RATE_MCS_ZLF_MSK) ? "ZLF " : "");
+}
+
+/**
+ * Program the device to use fixed rate for frame transmit
+ * This is for debugging/testing only
+ * once the device start use fixed rate, we need to reload the module
+ * to being back the normal operation.
+ */
+static void rs_program_fix_rate(struct iwl_mvm *mvm,
+				struct iwl_lq_sta *lq_sta)
+{
+	lq_sta->active_legacy_rate = 0x0FFF;	/* 1 - 54 MBits, includes CCK */
+	lq_sta->active_siso_rate   = 0x1FD0;	/* 6 - 60 MBits, no 9, no CCK */
+	lq_sta->active_mimo2_rate  = 0x1FD0;	/* 6 - 60 MBits, no 9, no CCK */
+
+	IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
+		       lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
+
+	if (lq_sta->dbg_fixed_rate) {
+		struct rs_rate rate;
+		rs_rate_from_ucode_rate(lq_sta->dbg_fixed_rate,
+					lq_sta->band, &rate);
+		rs_fill_lq_cmd(mvm, NULL, lq_sta, &rate);
+		iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, false);
 	}
 }
 
@@ -2572,15 +2659,14 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
 	char *buff;
 	int desc = 0;
 	int i = 0;
-	int index = 0;
 	ssize_t ret;
 
 	struct iwl_lq_sta *lq_sta = file->private_data;
 	struct iwl_mvm *mvm;
 	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-
+	struct rs_rate *rate = &tbl->rate;
 	mvm = lq_sta->drv;
-	buff = kmalloc(1024, GFP_KERNEL);
+	buff = kmalloc(2048, GFP_KERNEL);
 	if (!buff)
 		return -ENOMEM;
 
@@ -2595,23 +2681,23 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
 	    (iwl_fw_valid_tx_ant(mvm->fw) & ANT_B) ? "ANT_B," : "",
 	    (iwl_fw_valid_tx_ant(mvm->fw) & ANT_C) ? "ANT_C" : "");
 	desc += sprintf(buff+desc, "lq type %s\n",
-			(is_legacy(tbl->lq_type)) ? "legacy" :
-			is_vht(tbl->lq_type) ? "VHT" : "HT");
-	if (is_ht(tbl->lq_type)) {
+			(is_legacy(rate)) ? "legacy" :
+			is_vht(rate) ? "VHT" : "HT");
+	if (!is_legacy(rate)) {
 		desc += sprintf(buff+desc, " %s",
-		   (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
+		   (is_siso(rate)) ? "SISO" : "MIMO2");
 		   desc += sprintf(buff+desc, " %s",
-				   (is_ht20(tbl)) ? "20MHz" :
-				   (is_ht40(tbl)) ? "40MHz" :
-				   (is_ht80(tbl)) ? "80Mhz" : "BAD BW");
+				   (is_ht20(rate)) ? "20MHz" :
+				   (is_ht40(rate)) ? "40MHz" :
+				   (is_ht80(rate)) ? "80Mhz" : "BAD BW");
 		   desc += sprintf(buff+desc, " %s %s\n",
-				   (tbl->is_SGI) ? "SGI" : "",
+				   (rate->sgi) ? "SGI" : "NGI",
 				   (lq_sta->is_agg) ? "AGG on" : "");
 	}
 	desc += sprintf(buff+desc, "last tx rate=0x%X\n",
 			lq_sta->last_rate_n_flags);
 	desc += sprintf(buff+desc,
-			"general: flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
+			"general: flags=0x%X mimo-d=%d s-ant=0x%x d-ant=0x%x\n",
 			lq_sta->lq.flags,
 			lq_sta->lq.mimo_delim,
 			lq_sta->lq.single_stream_ant_msk,
@@ -2631,19 +2717,10 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
 			lq_sta->lq.initial_rate_index[3]);
 
 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
-		index = iwl_hwrate_to_plcp_idx(
-			le32_to_cpu(lq_sta->lq.rs_table[i]));
-		if (is_legacy(tbl->lq_type)) {
-			desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
-					i, le32_to_cpu(lq_sta->lq.rs_table[i]),
-					iwl_rate_mcs[index].mbps);
-		} else {
-			desc += sprintf(buff+desc,
-					" rate[%d] 0x%X %smbps (%s)\n",
-					i, le32_to_cpu(lq_sta->lq.rs_table[i]),
-					iwl_rate_mcs[index].mbps,
-					iwl_rate_mcs[index].mcs);
-		}
+		u32 r = le32_to_cpu(lq_sta->lq.rs_table[i]);
+
+		desc += sprintf(buff+desc, " rate[%d] 0x%X ", i, r);
+		desc += rs_pretty_print_rate(buff+desc, r);
 	}
 
 	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
@@ -2665,6 +2742,7 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
 	int i, j;
 	ssize_t ret;
 	struct iwl_scale_tbl_info *tbl;
+	struct rs_rate *rate;
 	struct iwl_lq_sta *lq_sta = file->private_data;
 
 	buff = kmalloc(1024, GFP_KERNEL);
@@ -2673,16 +2751,17 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
 
 	for (i = 0; i < LQ_SIZE; i++) {
 		tbl = &(lq_sta->lq_info[i]);
+		rate = &tbl->rate;
 		desc += sprintf(buff+desc,
 				"%s type=%d SGI=%d BW=%s DUP=0\n"
-				"rate=0x%X\n",
+				"index=%d\n",
 				lq_sta->active_tbl == i ? "*" : "x",
-				tbl->lq_type,
-				tbl->is_SGI,
-				is_ht20(tbl) ? "20Mhz" :
-				is_ht40(tbl) ? "40Mhz" :
-				is_ht80(tbl) ? "80Mhz" : "ERR",
-				tbl->current_rate);
+				rate->type,
+				rate->sgi,
+				is_ht20(rate) ? "20Mhz" :
+				is_ht40(rate) ? "40Mhz" :
+				is_ht80(rate) ? "80Mhz" : "ERR",
+				rate->index);
 		for (j = 0; j < IWL_RATE_COUNT; j++) {
 			desc += sprintf(buff+desc,
 				"counter=%d success=%d %%=%d\n",
@@ -2746,6 +2825,7 @@ static struct rate_control_ops rs_mvm_ops = {
 	.free = rs_free,
 	.alloc_sta = rs_alloc_sta,
 	.free_sta = rs_free_sta,
+	.rate_update = rs_rate_update,
 #ifdef CONFIG_MAC80211_DEBUGFS
 	.add_sta_debugfs = rs_add_debugfs,
 	.remove_sta_debugfs = rs_remove_debugfs,
@@ -2778,13 +2858,13 @@ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
 
 	if (enable) {
 		if (mvmsta->tx_protection == 0)
-			lq->flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK;
+			lq->flags |= LQ_FLAG_USE_RTS_MSK;
 		mvmsta->tx_protection++;
 	} else {
 		mvmsta->tx_protection--;
 		if (mvmsta->tx_protection == 0)
-			lq->flags &= ~LQ_FLAG_SET_STA_TLC_RTS_MSK;
+			lq->flags &= ~LQ_FLAG_USE_RTS_MSK;
 	}
 
-	return iwl_mvm_send_lq_cmd(mvm, lq, CMD_ASYNC, false);
+	return iwl_mvm_send_lq_cmd(mvm, lq, false);
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 5d5344f7070b..7bc6404f6986 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -155,38 +155,7 @@ enum {
 #define IWL_RATE_SCALE_SWITCH		10880	/*  85% */
 #define IWL_RATE_HIGH_TH		10880	/*  85% */
 #define IWL_RATE_INCREASE_TH		6400	/*  50% */
-#define IWL_RATE_DECREASE_TH		1920	/*  15% */
-
-/* possible actions when in legacy mode */
-enum {
-	IWL_LEGACY_SWITCH_ANTENNA,
-	IWL_LEGACY_SWITCH_SISO,
-	IWL_LEGACY_SWITCH_MIMO2,
-	IWL_LEGACY_FIRST_ACTION = IWL_LEGACY_SWITCH_ANTENNA,
-	IWL_LEGACY_LAST_ACTION = IWL_LEGACY_SWITCH_MIMO2,
-};
-
-/* possible actions when in siso mode */
-enum {
-	IWL_SISO_SWITCH_ANTENNA,
-	IWL_SISO_SWITCH_MIMO2,
-	IWL_SISO_SWITCH_GI,
-	IWL_SISO_FIRST_ACTION = IWL_SISO_SWITCH_ANTENNA,
-	IWL_SISO_LAST_ACTION = IWL_SISO_SWITCH_GI,
-};
-
-/* possible actions when in mimo mode */
-enum {
-	IWL_MIMO2_SWITCH_SISO_A,
-	IWL_MIMO2_SWITCH_SISO_B,
-	IWL_MIMO2_SWITCH_GI,
-	IWL_MIMO2_FIRST_ACTION = IWL_MIMO2_SWITCH_SISO_A,
-	IWL_MIMO2_LAST_ACTION = IWL_MIMO2_SWITCH_GI,
-};
-
-#define IWL_MAX_SEARCH IWL_MIMO2_LAST_ACTION
-
-#define IWL_ACTION_LIMIT		3	/* # possible actions */
+#define RS_SR_FORCE_DECREASE		1920	/*  15% */
 
 #define LINK_QUAL_AGG_TIME_LIMIT_DEF	(4000) /* 4 milliseconds */
 #define LINK_QUAL_AGG_TIME_LIMIT_MAX	(8000)
@@ -224,22 +193,45 @@ enum iwl_table_type {
 	LQ_MAX,
 };
 
-#define is_legacy(tbl) (((tbl) == LQ_LEGACY_G) || ((tbl) == LQ_LEGACY_A))
-#define is_ht_siso(tbl) ((tbl) == LQ_HT_SISO)
-#define is_ht_mimo2(tbl) ((tbl) == LQ_HT_MIMO2)
-#define is_vht_siso(tbl) ((tbl) == LQ_VHT_SISO)
-#define is_vht_mimo2(tbl) ((tbl) == LQ_VHT_MIMO2)
-#define is_siso(tbl) (is_ht_siso(tbl) || is_vht_siso(tbl))
-#define is_mimo2(tbl) (is_ht_mimo2(tbl) || is_vht_mimo2(tbl))
-#define is_mimo(tbl) (is_mimo2(tbl))
-#define is_ht(tbl) (is_ht_siso(tbl) || is_ht_mimo2(tbl))
-#define is_vht(tbl) (is_vht_siso(tbl) || is_vht_mimo2(tbl))
-#define is_a_band(tbl) ((tbl) == LQ_LEGACY_A)
-#define is_g_band(tbl) ((tbl) == LQ_LEGACY_G)
-
-#define is_ht20(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_20)
-#define is_ht40(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_40)
-#define is_ht80(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_80)
+struct rs_rate {
+	int index;
+	enum iwl_table_type type;
+	u8 ant;
+	u32 bw;
+	bool sgi;
+};
+
+
+#define is_type_legacy(type) (((type) == LQ_LEGACY_G) || \
+			      ((type) == LQ_LEGACY_A))
+#define is_type_ht_siso(type) ((type) == LQ_HT_SISO)
+#define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2)
+#define is_type_vht_siso(type) ((type) == LQ_VHT_SISO)
+#define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2)
+#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type))
+#define is_type_mimo2(type) (is_type_ht_mimo2(type) || is_type_vht_mimo2(type))
+#define is_type_mimo(type) (is_type_mimo2(type))
+#define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type))
+#define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type))
+#define is_type_a_band(type) ((type) == LQ_LEGACY_A)
+#define is_type_g_band(type) ((type) == LQ_LEGACY_G)
+
+#define is_legacy(rate)       is_type_legacy((rate)->type)
+#define is_ht_siso(rate)      is_type_ht_siso((rate)->type)
+#define is_ht_mimo2(rate)     is_type_ht_mimo2((rate)->type)
+#define is_vht_siso(rate)     is_type_vht_siso((rate)->type)
+#define is_vht_mimo2(rate)    is_type_vht_mimo2((rate)->type)
+#define is_siso(rate)         is_type_siso((rate)->type)
+#define is_mimo2(rate)        is_type_mimo2((rate)->type)
+#define is_mimo(rate)         is_type_mimo((rate)->type)
+#define is_ht(rate)           is_type_ht((rate)->type)
+#define is_vht(rate)          is_type_vht((rate)->type)
+#define is_a_band(rate)       is_type_a_band((rate)->type)
+#define is_g_band(rate)       is_type_g_band((rate)->type)
+
+#define is_ht20(rate)         ((rate)->bw == RATE_MCS_CHAN_WIDTH_20)
+#define is_ht40(rate)         ((rate)->bw == RATE_MCS_CHAN_WIDTH_40)
+#define is_ht80(rate)         ((rate)->bw == RATE_MCS_CHAN_WIDTH_80)
 
 #define IWL_MAX_MCS_DISPLAY_SIZE	12
 
@@ -257,7 +249,23 @@ struct iwl_rate_scale_data {
 	s32 success_ratio;	/* per-cent * 128  */
 	s32 counter;		/* number of frames attempted */
 	s32 average_tpt;	/* success ratio * expected throughput */
-	unsigned long stamp;
+};
+
+/* Possible Tx columns
+ * Tx Column = a combo of legacy/siso/mimo x antenna x SGI
+ */
+enum rs_column {
+	RS_COLUMN_LEGACY_ANT_A = 0,
+	RS_COLUMN_LEGACY_ANT_B,
+	RS_COLUMN_SISO_ANT_A,
+	RS_COLUMN_SISO_ANT_B,
+	RS_COLUMN_SISO_ANT_A_SGI,
+	RS_COLUMN_SISO_ANT_B_SGI,
+	RS_COLUMN_MIMO2,
+	RS_COLUMN_MIMO2_SGI,
+
+	RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI,
+	RS_COLUMN_INVALID,
 };
 
 /**
@@ -267,17 +275,18 @@ struct iwl_rate_scale_data {
  * one for "active", and one for "search".
  */
 struct iwl_scale_tbl_info {
-	enum iwl_table_type lq_type;
-	u8 ant_type;
-	u8 is_SGI;	/* 1 = short guard interval */
-	u32 bw;	        /* channel bandwidth; RATE_MCS_CHAN_WIDTH_XX */
-	u8 action;	/* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
-	u8 max_search;	/* maximun number of tables we can search */
+	struct rs_rate rate;
+	enum rs_column column;
 	s32 *expected_tpt;	/* throughput metrics; expected_tpt_G, etc. */
-	u32 current_rate;  /* rate_n_flags, uCode API format */
 	struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
 };
 
+enum {
+	RS_STATE_SEARCH_CYCLE_STARTED,
+	RS_STATE_SEARCH_CYCLE_ENDED,
+	RS_STATE_STAY_IN_COLUMN,
+};
+
 /**
  * struct iwl_lq_sta -- driver's rate scaling private structure
  *
@@ -285,8 +294,7 @@ struct iwl_scale_tbl_info {
  */
 struct iwl_lq_sta {
 	u8 active_tbl;		/* index of active table, range 0-1 */
-	u8 enable_counter;	/* indicates HT mode */
-	u8 stay_in_tbl;		/* 1: disallow, 0: allow search for new mode */
+	u8 rs_state;            /* RS_STATE_* */
 	u8 search_better_tbl;	/* 1: currently trying alternate mode */
 	s32 last_tpt;
 
@@ -299,12 +307,13 @@ struct iwl_lq_sta {
 	u32 total_success;	/* total successful frames, any/all rates */
 	u64 flush_timer;	/* time staying in mode before new search */
 
-	u8 action_counter;	/* # mode-switch actions tried */
+	u32 visited_columns;    /* Bitmask marking which Tx columns were
+				 * explored during a search cycle
+				 */
 	bool is_vht;
 	enum ieee80211_band band;
 
 	/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
-	u32 supp_rates;
 	u16 active_legacy_rate;
 	u16 active_siso_rate;
 	u16 active_mimo2_rate;
@@ -328,32 +337,11 @@ struct iwl_lq_sta {
 	u32 last_rate_n_flags;
 	/* packets destined for this STA are aggregated */
 	u8 is_agg;
-	/* BT traffic this sta was last updated in */
-	u8 last_bt_traffic;
-};
-
-enum iwl_bt_coex_profile_traffic_load {
-	IWL_BT_COEX_TRAFFIC_LOAD_NONE		= 0,
-	IWL_BT_COEX_TRAFFIC_LOAD_LOW		= 1,
-	IWL_BT_COEX_TRAFFIC_LOAD_HIGH		= 2,
-	IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS	= 3,
-/*
- * There are no more even though below is a u8, the
- * indication from the BT device only has two bits.
- */
 };
 
-
-static inline u8 num_of_ant(u8 mask)
-{
-	return  !!((mask) & ANT_A) +
-		!!((mask) & ANT_B) +
-		!!((mask) & ANT_C);
-}
-
 /* Initialize station's rate scaling information after adding station */
 void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			  enum ieee80211_band band);
+			  enum ieee80211_band band, bool init);
 
 /**
  * iwl_rate_control_register - Register the rate control algorithm callbacks
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 3a1f3982109d..a85b60f7e67e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -251,6 +251,12 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
 		stats->flag |= RX_FLAG_DECRYPTED;
 		return 0;
 
+	case RX_MPDU_RES_STATUS_SEC_EXT_ENC:
+		if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK))
+			return -1;
+		stats->flag |= RX_FLAG_DECRYPTED;
+		return 0;
+
 	default:
 		IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
 	}
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index dff7592e1ff8..0e0007960612 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,9 @@
 
 #define IWL_PLCP_QUIET_THRESH 1
 #define IWL_ACTIVE_QUIET_TIME 10
+#define LONG_OUT_TIME_PERIOD 600
+#define SHORT_OUT_TIME_PERIOD 200
+#define SUSPEND_TIME_PERIOD 100
 
 static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
 {
@@ -87,20 +90,22 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
 	return cpu_to_le16(rx_chain);
 }
 
-static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif)
+static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif,
+					       u32 flags, bool is_assoc)
 {
-	if (vif->bss_conf.assoc)
-		return cpu_to_le32(200 * 1024);
-	else
+	if (!is_assoc)
 		return 0;
+	if (flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
+		return cpu_to_le32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
+	return cpu_to_le32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
 }
 
-static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif)
+static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif,
+					       bool is_assoc)
 {
-	if (!vif->bss_conf.assoc)
+	if (!is_assoc)
 		return 0;
-
-	return cpu_to_le32(ieee80211_tu_to_usec(vif->bss_conf.beacon_int));
+	return cpu_to_le32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
 }
 
 static inline __le32
@@ -192,7 +197,7 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
 	for (i = 0; i < cmd->channel_count; i++) {
 		chan->channel = cpu_to_le16(req->channels[i]->hw_value);
 		chan->type = cpu_to_le32(type);
-		if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+		if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR)
 			chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
 		chan->active_dwell = cpu_to_le16(active_dwell);
 		chan->passive_dwell = cpu_to_le16(passive_dwell);
@@ -262,6 +267,15 @@ static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
 	return (u16)len;
 }
 
+static void iwl_mvm_vif_assoc_iterator(void *data, u8 *mac,
+				       struct ieee80211_vif *vif)
+{
+	bool *is_assoc = data;
+
+	if (vif->bss_conf.assoc)
+		*is_assoc = true;
+}
+
 int iwl_mvm_scan_request(struct iwl_mvm *mvm,
 			 struct ieee80211_vif *vif,
 			 struct cfg80211_scan_request *req)
@@ -274,6 +288,7 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
 		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
 	};
 	struct iwl_scan_cmd *cmd = mvm->scan_cmd;
+	bool is_assoc = false;
 	int ret;
 	u32 status;
 	int ssid_len = 0;
@@ -289,13 +304,17 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
 	memset(cmd, 0, sizeof(struct iwl_scan_cmd) +
 	       mvm->fw->ucode_capa.max_probe_length +
 	       (MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel)));
-
+	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+					    IEEE80211_IFACE_ITER_NORMAL,
+					    iwl_mvm_vif_assoc_iterator,
+					    &is_assoc);
 	cmd->channel_count = (u8)req->n_channels;
 	cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
 	cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
 	cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
-	cmd->max_out_time = iwl_mvm_scan_max_out_time(vif);
-	cmd->suspend_time = iwl_mvm_scan_suspend_time(vif);
+	cmd->max_out_time = iwl_mvm_scan_max_out_time(vif, req->flags,
+						      is_assoc);
+	cmd->suspend_time = iwl_mvm_scan_suspend_time(vif, is_assoc);
 	cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req);
 	cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
 					MAC_FILTER_IN_BEACON);
@@ -454,13 +473,18 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
 	if (mvm->scan_status == IWL_MVM_SCAN_NONE)
 		return;
 
+	if (iwl_mvm_is_radio_killed(mvm)) {
+		ieee80211_scan_completed(mvm->hw, true);
+		mvm->scan_status = IWL_MVM_SCAN_NONE;
+		return;
+	}
+
 	iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
 				   scan_abort_notif,
 				   ARRAY_SIZE(scan_abort_notif),
 				   iwl_mvm_scan_abort_notif, NULL);
 
-	ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD,
-				   CMD_SYNC | CMD_SEND_IN_RFKILL, 0, NULL);
+	ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
 	if (ret) {
 		IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
 		/* mac80211's state will be cleaned in the fw_restart flow */
@@ -522,6 +546,12 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
 			       struct cfg80211_sched_scan_request *req,
 			       struct iwl_scan_offload_cmd *scan)
 {
+	bool is_assoc = false;
+
+	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+					    IEEE80211_IFACE_ITER_NORMAL,
+					    iwl_mvm_vif_assoc_iterator,
+					    &is_assoc);
 	scan->channel_count =
 		mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
 		mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
@@ -529,8 +559,9 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
 	scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
 	scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
 	scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
-	scan->max_out_time = cpu_to_le32(200 * 1024);
-	scan->suspend_time = iwl_mvm_scan_suspend_time(vif);
+	scan->max_out_time = iwl_mvm_scan_max_out_time(vif, req->flags,
+						       is_assoc);
+	scan->suspend_time = iwl_mvm_scan_suspend_time(vif, is_assoc);
 	scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
 					  MAC_FILTER_IN_BEACON);
 	scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
@@ -642,7 +673,7 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
 		channels->iter_count[index] = cpu_to_le16(1);
 		channels->iter_interval[index] = 0;
 
-		if (!(s_band->channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN))
+		if (!(s_band->channels[i].flags & IEEE80211_CHAN_NO_IR))
 			channels->type[index] |=
 				cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
 
@@ -817,11 +848,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
 		IWL_DEBUG_SCAN(mvm,
 			       "Sending scheduled scan with filtering, filter len %d\n",
 			       req->n_match_sets);
-		scan_req.flags |=
-				cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID);
 	} else {
 		IWL_DEBUG_SCAN(mvm,
 			       "Sending Scheduled scan without filtering\n");
+		scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
 	}
 
 	return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC,
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c
new file mode 100644
index 000000000000..8401627c0030
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/sf.c
@@ -0,0 +1,291 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+
+/* For counting bound interfaces */
+struct iwl_mvm_active_iface_iterator_data {
+	struct ieee80211_vif *ignore_vif;
+	u8 sta_vif_ap_sta_id;
+	enum iwl_sf_state sta_vif_state;
+	int num_active_macs;
+};
+
+/*
+ * Count bound interfaces which are not p2p, besides data->ignore_vif.
+ * data->station_vif will point to one bound vif of type station, if exists.
+ */
+static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac,
+					 struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_active_iface_iterator_data *data = _data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	if (vif == data->ignore_vif || !mvmvif->phy_ctxt ||
+	    vif->type == NL80211_IFTYPE_P2P_DEVICE)
+		return;
+
+	data->num_active_macs++;
+
+	if (vif->type == NL80211_IFTYPE_STATION) {
+		data->sta_vif_ap_sta_id = mvmvif->ap_sta_id;
+		if (vif->bss_conf.assoc)
+			data->sta_vif_state = SF_FULL_ON;
+		else
+			data->sta_vif_state = SF_INIT_OFF;
+	}
+}
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in SF_FULL_ON state.
+ */
+static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
+	{
+		cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER),
+		cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER)
+	},
+	{
+		cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER),
+		cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER)
+	},
+	{
+		cpu_to_le32(SF_MCAST_AGING_TIMER),
+		cpu_to_le32(SF_MCAST_IDLE_TIMER)
+	},
+	{
+		cpu_to_le32(SF_BA_AGING_TIMER),
+		cpu_to_le32(SF_BA_IDLE_TIMER)
+	},
+	{
+		cpu_to_le32(SF_TX_RE_AGING_TIMER),
+		cpu_to_le32(SF_TX_RE_IDLE_TIMER)
+	},
+};
+
+static void iwl_mvm_fill_sf_command(struct iwl_sf_cfg_cmd *sf_cmd,
+				    struct ieee80211_sta *sta)
+{
+	int i, j, watermark;
+
+	sf_cmd->watermark[SF_LONG_DELAY_ON] = cpu_to_le32(SF_W_MARK_SCAN);
+
+	/*
+	 * If we are in association flow - check antenna configuration
+	 * capabilities of the AP station, and choose the watermark accordingly.
+	 */
+	if (sta) {
+		if (sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) {
+			switch (sta->rx_nss) {
+			case 1:
+				watermark = SF_W_MARK_SISO;
+				break;
+			case 2:
+				watermark = SF_W_MARK_MIMO2;
+				break;
+			default:
+				watermark = SF_W_MARK_MIMO3;
+				break;
+			}
+		} else {
+			watermark = SF_W_MARK_LEGACY;
+		}
+	/* default watermark value for unassociated mode. */
+	} else {
+		watermark = SF_W_MARK_MIMO2;
+	}
+	sf_cmd->watermark[SF_FULL_ON] = cpu_to_le32(watermark);
+
+	for (i = 0; i < SF_NUM_SCENARIO; i++) {
+		for (j = 0; j < SF_NUM_TIMEOUT_TYPES; j++) {
+			sf_cmd->long_delay_timeouts[i][j] =
+					cpu_to_le32(SF_LONG_DELAY_AGING_TIMER);
+		}
+	}
+	BUILD_BUG_ON(sizeof(sf_full_timeout) !=
+		     sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES);
+
+	memcpy(sf_cmd->full_on_timeouts, sf_full_timeout,
+	       sizeof(sf_full_timeout));
+}
+
+static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
+			     enum iwl_sf_state new_state)
+{
+	struct iwl_sf_cfg_cmd sf_cmd = {
+		.state = new_state,
+	};
+	struct ieee80211_sta *sta;
+	int ret = 0;
+
+	/*
+	 * If an associated AP sta changed its antenna configuration, the state
+	 * will remain FULL_ON but SF parameters need to be reconsidered.
+	 */
+	if (new_state != SF_FULL_ON && mvm->sf_state == new_state)
+		return 0;
+
+	switch (new_state) {
+	case SF_UNINIT:
+		break;
+	case SF_FULL_ON:
+		if (sta_id == IWL_MVM_STATION_COUNT) {
+			IWL_ERR(mvm,
+				"No station: Cannot switch SF to FULL_ON\n");
+			return -EINVAL;
+		}
+		rcu_read_lock();
+		sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+		if (IS_ERR_OR_NULL(sta)) {
+			IWL_ERR(mvm, "Invalid station id\n");
+			rcu_read_unlock();
+			return -EINVAL;
+		}
+		iwl_mvm_fill_sf_command(&sf_cmd, sta);
+		rcu_read_unlock();
+		break;
+	case SF_INIT_OFF:
+		iwl_mvm_fill_sf_command(&sf_cmd, NULL);
+		break;
+	default:
+		WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n",
+			  new_state);
+		return -EINVAL;
+	}
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_SF_CFG_CMD, CMD_ASYNC,
+				   sizeof(sf_cmd), &sf_cmd);
+	if (!ret)
+		mvm->sf_state = new_state;
+
+	return ret;
+}
+
+/*
+ * Update Smart fifo:
+ * Count bound interfaces that are not to be removed, ignoring p2p devices,
+ * and set new state accordingly.
+ */
+int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
+		      bool remove_vif)
+{
+	enum iwl_sf_state new_state;
+	u8 sta_id = IWL_MVM_STATION_COUNT;
+	struct iwl_mvm_vif *mvmvif = NULL;
+	struct iwl_mvm_active_iface_iterator_data data = {
+		.ignore_vif = changed_vif,
+		.sta_vif_state = SF_UNINIT,
+		.sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT,
+	};
+
+	if (IWL_UCODE_API(mvm->fw->ucode_ver) < 8)
+		return 0;
+
+	/*
+	 * Ignore the call if we are in HW Restart flow, or if the handled
+	 * vif is a p2p device.
+	 */
+	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
+	    (changed_vif && changed_vif->type == NL80211_IFTYPE_P2P_DEVICE))
+		return 0;
+
+	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   iwl_mvm_bound_iface_iterator,
+						   &data);
+
+	/* If changed_vif exists and is not to be removed, add to the count */
+	if (changed_vif && !remove_vif)
+		data.num_active_macs++;
+
+	switch (data.num_active_macs) {
+	case 0:
+		/* If there are no active macs - change state to SF_INIT_OFF */
+		new_state = SF_INIT_OFF;
+		break;
+	case 1:
+		if (remove_vif) {
+			/* The one active mac left is of type station
+			 * and we filled the relevant data during iteration
+			 */
+			new_state = data.sta_vif_state;
+			sta_id = data.sta_vif_ap_sta_id;
+		} else {
+			if (WARN_ON(!changed_vif))
+				return -EINVAL;
+			if (changed_vif->type != NL80211_IFTYPE_STATION) {
+				new_state = SF_UNINIT;
+			} else if (changed_vif->bss_conf.assoc) {
+				mvmvif = iwl_mvm_vif_from_mac80211(changed_vif);
+				sta_id = mvmvif->ap_sta_id;
+				new_state = SF_FULL_ON;
+			} else {
+				new_state = SF_INIT_OFF;
+			}
+		}
+		break;
+	default:
+		/* If there are multiple active macs - change to SF_UNINIT */
+		new_state = SF_UNINIT;
+	}
+	return iwl_mvm_sf_config(mvm, sta_id, new_state);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 329952363a54..ec1812133235 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -452,8 +452,15 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
 			rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
 						  lockdep_is_held(&mvm->mutex));
 
-		/* This station is in use */
-		if (!IS_ERR(sta))
+		/*
+		 * This station is in use or RCU-removed; the latter happens in
+		 * managed mode, where mac80211 removes the station before we
+		 * can remove it from firmware (we can only do that after the
+		 * MAC is marked unassociated), and possibly while the deauth
+		 * frame to disconnect from the AP is still queued. Then, the
+		 * station pointer is -ENOENT when the last skb is reclaimed.
+		 */
+		if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
 			continue;
 
 		if (PTR_ERR(sta) == -EINVAL) {
@@ -840,7 +847,7 @@ static const u8 tid_to_ac[] = {
 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 			     struct ieee80211_sta *sta, u16 tid, u16 *ssn)
 {
-	struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_mvm_tid_data *tid_data;
 	int txq_id;
 
@@ -895,7 +902,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 			    struct ieee80211_sta *sta, u16 tid, u8 buf_size)
 {
-	struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
 	int queue, fifo, ret;
 	u16 ssn;
@@ -932,26 +939,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 	IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
 		     sta->addr, tid);
 
-	if (mvm->cfg->ht_params->use_rts_for_aggregation) {
-		/*
-		 * switch to RTS/CTS if it is the prefer protection
-		 * method for HT traffic
-		 * this function also sends the LQ command
-		 */
-		return iwl_mvm_tx_protection(mvm, mvmsta, true);
-		/*
-		 * TODO: remove the TLC_RTS flag when we tear down the last
-		 * AGG session (agg_tids_count in DVM)
-		 */
-	}
-
-	return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, CMD_ASYNC, false);
+	return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
 }
 
 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 			    struct ieee80211_sta *sta, u16 tid)
 {
-	struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
 	u16 txq_id;
 	int err;
@@ -1023,7 +1017,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 			    struct ieee80211_sta *sta, u16 tid)
 {
-	struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
 	u16 txq_id;
 	enum iwl_mvm_agg_state old_state;
@@ -1123,8 +1117,8 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
 		memcpy(cmd.key, keyconf->key, keyconf->keylen);
 		break;
 	default:
-		WARN_ON(1);
-		return -EINVAL;
+		key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
+		memcpy(cmd.key, keyconf->key, keyconf->keylen);
 	}
 
 	if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
@@ -1288,8 +1282,8 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
 					   0, NULL, CMD_SYNC);
 		break;
 	default:
-		IWL_ERR(mvm, "Unknown cipher %x\n", keyconf->cipher);
-		ret = -EINVAL;
+		ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf,
+					   sta_id, 0, NULL, CMD_SYNC);
 	}
 
 	if (ret)
@@ -1416,7 +1410,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
 				struct ieee80211_sta *sta)
 {
-	struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_mvm_add_sta_cmd_v6 cmd = {
 		.add_modify = STA_MODE_MODIFY,
 		.sta_id = mvmsta->sta_id,
@@ -1438,7 +1432,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
 	u16 sleep_state_flags =
 		(reason == IEEE80211_FRAME_RELEASE_UAPSD) ?
 			STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL;
-	struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_mvm_add_sta_cmd_v6 cmd = {
 		.add_modify = STA_MODE_MODIFY,
 		.sta_id = mvmsta->sta_id,
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 4dfc359a4bdd..4968d0237dc5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -298,6 +298,12 @@ struct iwl_mvm_sta {
 	bool tt_tx_protection;
 };
 
+static inline struct iwl_mvm_sta *
+iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta)
+{
+	return (void *)sta->drv_priv;
+}
+
 /**
  * struct iwl_mvm_int_sta - representation of an internal station (auxiliary or
  * broadcast)
diff --git a/drivers/net/wireless/iwlwifi/mvm/testmode.h b/drivers/net/wireless/iwlwifi/mvm/testmode.h
index eb74391d91ca..0241665925f7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/testmode.h
+++ b/drivers/net/wireless/iwlwifi/mvm/testmode.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 95ce4b601fef..b4c2abaa297b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -249,12 +249,12 @@ static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
 		container_of(notif_wait, struct iwl_mvm, notif_wait);
 	struct iwl_mvm_time_event_data *te_data = data;
 	struct iwl_time_event_resp *resp;
-	int resp_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+	int resp_len = iwl_rx_packet_payload_len(pkt);
 
 	if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
 		return true;
 
-	if (WARN_ON_ONCE(resp_len != sizeof(pkt->hdr) + sizeof(*resp))) {
+	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
 		IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
 		return true;
 	}
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
index d9c8d6cfa2db..4a61c8c02372 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index 1f3282dff513..3afa6b6bf835 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -340,7 +340,7 @@ static void check_exit_ctkill(struct work_struct *work)
 
 	iwl_trans_start_hw(mvm->trans);
 	temp = check_nic_temperature(mvm);
-	iwl_trans_stop_hw(mvm->trans, false);
+	iwl_trans_stop_device(mvm->trans);
 
 	if (temp < MIN_TEMPERATURE || temp > MAX_TEMPERATURE) {
 		IWL_DEBUG_TEMP(mvm, "Failed to measure NIC temperature\n");
@@ -388,7 +388,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
 						lockdep_is_held(&mvm->mutex));
 		if (IS_ERR_OR_NULL(sta))
 			continue;
-		mvmsta = (void *)sta->drv_priv;
+		mvmsta = iwl_mvm_sta_from_mac80211(sta);
 		if (enable == mvmsta->tt_tx_protection)
 			continue;
 		err = iwl_mvm_tx_protection(mvm, mvmsta, enable);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 43d97c33a75a..90378c217bc7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -253,8 +253,7 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
 		memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
 		break;
 	default:
-		IWL_ERR(mvm, "Unknown encode cipher %x\n", keyconf->cipher);
-		break;
+		tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
 	}
 }
 
@@ -276,6 +275,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
 		return NULL;
 
 	memset(dev_cmd, 0, sizeof(*dev_cmd));
+	dev_cmd->hdr.cmd = TX_CMD;
 	tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 
 	if (info->control.hw_key)
@@ -361,7 +361,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
 	u8 txq_id = info->hw_queue;
 	bool is_data_qos = false, is_ampdu = false;
 
-	mvmsta = (void *)sta->drv_priv;
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	fc = hdr->frame_control;
 
 	if (WARN_ON_ONCE(!mvmsta))
@@ -390,7 +390,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
 		seq_number &= IEEE80211_SCTL_SEQ;
 		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
 		hdr->seq_ctrl |= cpu_to_le16(seq_number);
-		seq_number += 0x10;
 		is_data_qos = true;
 		is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
 	}
@@ -407,13 +406,13 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
 	}
 
 	IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
-		     tid, txq_id, seq_number);
+		     tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
 
 	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
 		goto drop_unlock_sta;
 
 	if (is_data_qos && !ieee80211_has_morefrags(fc))
-		mvmsta->tid_data[tid].seq_number = seq_number;
+		mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
 
 	spin_unlock(&mvmsta->lock);
 
@@ -432,7 +431,7 @@ drop:
 static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
 				      struct ieee80211_sta *sta, u8 tid)
 {
-	struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
 	struct ieee80211_vif *vif = mvmsta->vif;
 
@@ -662,7 +661,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
 
 	if (!IS_ERR_OR_NULL(sta)) {
-		mvmsta = (void *)sta->drv_priv;
+		mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
 		if (tid != IWL_TID_NON_QOS) {
 			struct iwl_mvm_tid_data *tid_data =
@@ -704,7 +703,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
 			 */
 			spin_lock_bh(&mvmsta->lock);
 			sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
-			if (IS_ERR_OR_NULL(sta)) {
+			if (!sta || PTR_ERR(sta) == -EBUSY) {
 				/*
 				 * Station disappeared in the meantime:
 				 * so we are draining.
@@ -713,7 +712,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
 				schedule_work(&mvm->sta_drained_wk);
 			}
 			spin_unlock_bh(&mvmsta->lock);
-		} else if (!mvmsta) {
+		} else if (!mvmsta && PTR_ERR(sta) == -EBUSY) {
 			/* Tx response without STA, so we are draining */
 			set_bit(sta_id, mvm->sta_drained);
 			schedule_work(&mvm->sta_drained_wk);
@@ -793,7 +792,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
 
 	if (!WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
-		struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 		mvmsta->tid_data[tid].rate_n_flags =
 			le32_to_cpu(tx_resp->initial_rate);
 	}
@@ -849,7 +848,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
 		return 0;
 	}
 
-	mvmsta = (void *)sta->drv_priv;
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	tid_data = &mvmsta->tid_data[tid];
 
 	if (WARN_ONCE(tid_data->txq_id != scd_flow, "Q %d, tid %d, flow %d",
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index ed69e9b78e82..a4a5e25623c3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -168,8 +168,8 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
 		goto out_free_resp;
 	}
 
-	resp_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-	if (WARN_ON_ONCE(resp_len != sizeof(pkt->hdr) + sizeof(*resp))) {
+	resp_len = iwl_rx_packet_payload_len(pkt);
+	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
 		ret = -EIO;
 		goto out_free_resp;
 	}
@@ -486,22 +486,18 @@ void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
  * this case to clear the state indicating that station creation is in
  * progress.
  */
-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
-			u8 flags, bool init)
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
 {
 	struct iwl_host_cmd cmd = {
 		.id = LQ_CMD,
 		.len = { sizeof(struct iwl_lq_cmd), },
-		.flags = flags,
+		.flags = init ? CMD_SYNC : CMD_ASYNC,
 		.data = { lq, },
 	};
 
 	if (WARN_ON(lq->sta_id == IWL_MVM_STATION_COUNT))
 		return -EINVAL;
 
-	if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
-		return -EINVAL;
-
 	return iwl_mvm_send_cmd(mvm, &cmd);
 }
 
@@ -522,6 +518,11 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 	int i;
 
 	lockdep_assert_held(&mvm->mutex);
+
+	/* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
+	if (num_of_ant(iwl_fw_valid_rx_ant(mvm->fw)) == 1)
+		return;
+
 	mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	mvmvif->smps_requests[req_type] = smps_request;
 	for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index e6272546395a..3040924f5f3c 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -297,6 +297,9 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
 	{IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
 	{IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
 	{IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
 	{IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
@@ -350,6 +353,8 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
 	{IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)},
 
 /* 7265 Series */
 	{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 051268c037b1..e851f26fd44c 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -256,13 +256,13 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
  * @hw_base: pci hardware address support
  * @ucode_write_complete: indicates that the ucode has been copied.
  * @ucode_write_waitq: wait queue for uCode load
- * @status - transport specific status flags
  * @cmd_queue - command queue number
  * @rx_buf_size_8k: 8 kB RX buffer size
  * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
  * @rx_page_order: page order for receive buffer size
  * @wd_timeout: queue watchdog timeout (jiffies)
  * @reg_lock: protect hw register access
+ * @cmd_in_flight: true when we have a host command in flight
  */
 struct iwl_trans_pcie {
 	struct iwl_rxq rxq;
@@ -274,7 +274,6 @@ struct iwl_trans_pcie {
 	__le32 *ict_tbl;
 	dma_addr_t ict_tbl_dma;
 	int ict_index;
-	u32 inta;
 	bool use_ict;
 	struct isr_statistics isr_stats;
 
@@ -296,7 +295,6 @@ struct iwl_trans_pcie {
 	wait_queue_head_t ucode_write_waitq;
 	wait_queue_head_t wait_command_queue;
 
-	unsigned long status;
 	u8 cmd_queue;
 	u8 cmd_fifo;
 	u8 n_no_reclaim_cmds;
@@ -313,24 +311,7 @@ struct iwl_trans_pcie {
 
 	/*protect hw register */
 	spinlock_t reg_lock;
-};
-
-/**
- * enum iwl_pcie_status: status of the PCIe transport
- * @STATUS_HCMD_ACTIVE: a SYNC command is being processed
- * @STATUS_DEVICE_ENABLED: APM is enabled
- * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
- * @STATUS_INT_ENABLED: interrupts are enabled
- * @STATUS_RFKILL: the HW RFkill switch is in KILL position
- * @STATUS_FW_ERROR: the fw is in error state
- */
-enum iwl_pcie_status {
-	STATUS_HCMD_ACTIVE,
-	STATUS_DEVICE_ENABLED,
-	STATUS_TPOWER_PMI,
-	STATUS_INT_ENABLED,
-	STATUS_RFKILL,
-	STATUS_FW_ERROR,
+	bool cmd_in_flight;
 };
 
 #define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
@@ -363,7 +344,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans);
 /*****************************************************
 * ICT - interrupt handling
 ******************************************************/
-irqreturn_t iwl_pcie_isr_ict(int irq, void *data);
+irqreturn_t iwl_pcie_isr(int irq, void *data);
 int iwl_pcie_alloc_ict(struct iwl_trans *trans);
 void iwl_pcie_free_ict(struct iwl_trans *trans);
 void iwl_pcie_reset_ict(struct iwl_trans *trans);
@@ -399,8 +380,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans);
 ******************************************************/
 static inline void iwl_disable_interrupts(struct iwl_trans *trans)
 {
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
+	clear_bit(STATUS_INT_ENABLED, &trans->status);
 
 	/* disable interrupts from uCode/NIC to host */
 	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
@@ -417,14 +397,18 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
 	IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
-	set_bit(STATUS_INT_ENABLED, &trans_pcie->status);
+	set_bit(STATUS_INT_ENABLED, &trans->status);
+	trans_pcie->inta_mask = CSR_INI_SET_MASK;
 	iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
 }
 
 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
 {
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
 	IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
-	iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+	trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
+	iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
 }
 
 static inline void iwl_wake_queue(struct iwl_trans *trans,
@@ -477,12 +461,31 @@ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
 		CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
 }
 
-static inline void iwl_nic_error(struct iwl_trans *trans)
+static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
+						  u32 reg, u32 mask, u32 value)
 {
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 v;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+	WARN_ON_ONCE(value & ~mask);
+#endif
+
+	v = iwl_read32(trans, reg);
+	v &= ~mask;
+	v |= value;
+	iwl_write32(trans, reg, v);
+}
 
-	set_bit(STATUS_FW_ERROR, &trans_pcie->status);
-	iwl_op_mode_nic_error(trans->op_mode);
+static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
+					      u32 reg, u32 mask)
+{
+	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
+}
+
+static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
+					    u32 reg, u32 mask)
+{
+	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
 }
 
 #endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index be3995afa9d0..08c23d497a02 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -148,10 +148,9 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
 				    struct iwl_rxq *rxq)
 {
-	unsigned long flags;
 	u32 reg;
 
-	spin_lock_irqsave(&rxq->lock, flags);
+	spin_lock(&rxq->lock);
 
 	if (rxq->need_update == 0)
 		goto exit_unlock;
@@ -162,11 +161,8 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
 		rxq->write_actual = (rxq->write & ~0x7);
 		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
 	} else {
-		struct iwl_trans_pcie *trans_pcie =
-			IWL_TRANS_GET_PCIE_TRANS(trans);
-
 		/* If power-saving is in use, make sure device is awake */
-		if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
+		if (test_bit(STATUS_TPOWER_PMI, &trans->status)) {
 			reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
 
 			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
@@ -193,7 +189,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
 	rxq->need_update = 0;
 
  exit_unlock:
-	spin_unlock_irqrestore(&rxq->lock, flags);
+	spin_unlock(&rxq->lock);
 }
 
 /*
@@ -212,7 +208,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
 	struct iwl_rx_mem_buffer *rxb;
-	unsigned long flags;
 
 	/*
 	 * If the device isn't enabled - not need to try to add buffers...
@@ -222,10 +217,10 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
 	 * stopped, we cannot access the HW (in particular not prph).
 	 * So don't try to restock if the APM has been already stopped.
 	 */
-	if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
+	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
 		return;
 
-	spin_lock_irqsave(&rxq->lock, flags);
+	spin_lock(&rxq->lock);
 	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
 		/* The overwritten rxb must be a used one */
 		rxb = rxq->queue[rxq->write];
@@ -242,7 +237,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
 		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
 		rxq->free_count--;
 	}
-	spin_unlock_irqrestore(&rxq->lock, flags);
+	spin_unlock(&rxq->lock);
 	/* If the pre-allocated buffer pool is dropping low, schedule to
 	 * refill it */
 	if (rxq->free_count <= RX_LOW_WATERMARK)
@@ -251,9 +246,9 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
 	/* If we've added more space for the firmware to place data, tell it.
 	 * Increment device's write pointer in multiples of 8. */
 	if (rxq->write_actual != (rxq->write & ~0x7)) {
-		spin_lock_irqsave(&rxq->lock, flags);
+		spin_lock(&rxq->lock);
 		rxq->need_update = 1;
-		spin_unlock_irqrestore(&rxq->lock, flags);
+		spin_unlock(&rxq->lock);
 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
 	}
 }
@@ -273,16 +268,15 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
 	struct iwl_rx_mem_buffer *rxb;
 	struct page *page;
-	unsigned long flags;
 	gfp_t gfp_mask = priority;
 
 	while (1) {
-		spin_lock_irqsave(&rxq->lock, flags);
+		spin_lock(&rxq->lock);
 		if (list_empty(&rxq->rx_used)) {
-			spin_unlock_irqrestore(&rxq->lock, flags);
+			spin_unlock(&rxq->lock);
 			return;
 		}
-		spin_unlock_irqrestore(&rxq->lock, flags);
+		spin_unlock(&rxq->lock);
 
 		if (rxq->free_count > RX_LOW_WATERMARK)
 			gfp_mask |= __GFP_NOWARN;
@@ -311,17 +305,17 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
 			return;
 		}
 
-		spin_lock_irqsave(&rxq->lock, flags);
+		spin_lock(&rxq->lock);
 
 		if (list_empty(&rxq->rx_used)) {
-			spin_unlock_irqrestore(&rxq->lock, flags);
+			spin_unlock(&rxq->lock);
 			__free_pages(page, trans_pcie->rx_page_order);
 			return;
 		}
 		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
 				       list);
 		list_del(&rxb->list);
-		spin_unlock_irqrestore(&rxq->lock, flags);
+		spin_unlock(&rxq->lock);
 
 		BUG_ON(rxb->page);
 		rxb->page = page;
@@ -332,9 +326,9 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
 				     DMA_FROM_DEVICE);
 		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
 			rxb->page = NULL;
-			spin_lock_irqsave(&rxq->lock, flags);
+			spin_lock(&rxq->lock);
 			list_add(&rxb->list, &rxq->rx_used);
-			spin_unlock_irqrestore(&rxq->lock, flags);
+			spin_unlock(&rxq->lock);
 			__free_pages(page, trans_pcie->rx_page_order);
 			return;
 		}
@@ -343,12 +337,12 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
 		/* and also 256 byte aligned! */
 		BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
 
-		spin_lock_irqsave(&rxq->lock, flags);
+		spin_lock(&rxq->lock);
 
 		list_add_tail(&rxb->list, &rxq->rx_free);
 		rxq->free_count++;
 
-		spin_unlock_irqrestore(&rxq->lock, flags);
+		spin_unlock(&rxq->lock);
 	}
 }
 
@@ -382,13 +376,12 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
 static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	unsigned long flags;
 
 	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
 
-	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	spin_lock(&trans_pcie->irq_lock);
 	iwl_pcie_rxq_restock(trans);
-	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+	spin_unlock(&trans_pcie->irq_lock);
 }
 
 static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
@@ -514,7 +507,6 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
 	int i, err;
-	unsigned long flags;
 
 	if (!rxq->bd) {
 		err = iwl_pcie_rx_alloc(trans);
@@ -522,7 +514,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
 			return err;
 	}
 
-	spin_lock_irqsave(&rxq->lock, flags);
+	spin_lock(&rxq->lock);
 
 	INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
 
@@ -538,16 +530,16 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
 	rxq->read = rxq->write = 0;
 	rxq->write_actual = 0;
 	memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
-	spin_unlock_irqrestore(&rxq->lock, flags);
+	spin_unlock(&rxq->lock);
 
 	iwl_pcie_rx_replenish(trans);
 
 	iwl_pcie_rx_hw_init(trans, rxq);
 
-	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	spin_lock(&trans_pcie->irq_lock);
 	rxq->need_update = 1;
 	iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
-	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+	spin_unlock(&trans_pcie->irq_lock);
 
 	return 0;
 }
@@ -556,7 +548,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
-	unsigned long flags;
 
 	/*if rxq->bd is NULL, it means that nothing has been allocated,
 	 * exit now */
@@ -567,9 +558,9 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
 
 	cancel_work_sync(&trans_pcie->rx_replenish);
 
-	spin_lock_irqsave(&rxq->lock, flags);
+	spin_lock(&rxq->lock);
 	iwl_pcie_rxq_free_rbs(trans);
-	spin_unlock_irqrestore(&rxq->lock, flags);
+	spin_unlock(&rxq->lock);
 
 	dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
 			  rxq->bd, rxq->bd_dma);
@@ -592,7 +583,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
 	struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
-	unsigned long flags;
 	bool page_stolen = false;
 	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
 	u32 offset = 0;
@@ -625,7 +615,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
 			rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
 			pkt->hdr.cmd);
 
-		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+		len = iwl_rx_packet_len(pkt);
 		len += sizeof(u32); /* account for status word */
 		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
 		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
@@ -694,7 +684,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
 	/* Reuse the page if possible. For notification packets and
 	 * SKBs that fail to Rx correctly, add them back into the
 	 * rx_free list for reuse later. */
-	spin_lock_irqsave(&rxq->lock, flags);
+	spin_lock(&rxq->lock);
 	if (rxb->page != NULL) {
 		rxb->page_dma =
 			dma_map_page(trans->dev, rxb->page, 0,
@@ -715,7 +705,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
 		}
 	} else
 		list_add_tail(&rxb->list, &rxq->rx_used);
-	spin_unlock_irqrestore(&rxq->lock, flags);
+	spin_unlock(&rxq->lock);
 }
 
 /*
@@ -791,7 +781,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
 			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
 	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
 			    APMG_PS_CTRL_VAL_RESET_REQ))) {
-		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
 		iwl_op_mode_wimax_active(trans->op_mode);
 		wake_up(&trans_pcie->wait_command_queue);
 		return;
@@ -800,14 +790,95 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
 	iwl_pcie_dump_csr(trans);
 	iwl_dump_fh(trans, NULL);
 
-	/* set the ERROR bit before we wake up the caller */
-	set_bit(STATUS_FW_ERROR, &trans_pcie->status);
-	clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
-	wake_up(&trans_pcie->wait_command_queue);
-
 	local_bh_disable();
-	iwl_nic_error(trans);
+	/* The STATUS_FW_ERROR bit is set in this function. This must happen
+	 * before we wake up the command caller, to ensure a proper cleanup. */
+	iwl_trans_fw_error(trans);
 	local_bh_enable();
+
+	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+	wake_up(&trans_pcie->wait_command_queue);
+}
+
+static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 inta;
+
+	lockdep_assert_held(&trans_pcie->irq_lock);
+
+	trace_iwlwifi_dev_irq(trans->dev);
+
+	/* Discover which interrupts are active/pending */
+	inta = iwl_read32(trans, CSR_INT);
+
+	/* the thread will service interrupts and re-enable them */
+	return inta;
+}
+
+/* a device (PCI-E) page is 4096 bytes long */
+#define ICT_SHIFT	12
+#define ICT_SIZE	(1 << ICT_SHIFT)
+#define ICT_COUNT	(ICT_SIZE / sizeof(u32))
+
+/* interrupt handler using ict table, with this interrupt driver will
+ * stop using INTA register to get device's interrupt, reading this register
+ * is expensive, device will write interrupts in ICT dram table, increment
+ * index then will fire interrupt to driver, driver will OR all ICT table
+ * entries from current index up to table entry with 0 value. the result is
+ * the interrupt we need to service, driver will set the entries back to 0 and
+ * set index.
+ */
+static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 inta;
+	u32 val = 0;
+	u32 read;
+
+	trace_iwlwifi_dev_irq(trans->dev);
+
+	/* Ignore interrupt if there's nothing in NIC to service.
+	 * This may be due to IRQ shared with another device,
+	 * or due to sporadic interrupts thrown from our NIC. */
+	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
+	if (!read)
+		return 0;
+
+	/*
+	 * Collect all entries up to the first 0, starting from ict_index;
+	 * note we already read at ict_index.
+	 */
+	do {
+		val |= read;
+		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
+				trans_pcie->ict_index, read);
+		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
+		trans_pcie->ict_index =
+			iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
+
+		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
+					   read);
+	} while (read);
+
+	/* We should not get this value, just ignore it. */
+	if (val == 0xffffffff)
+		val = 0;
+
+	/*
+	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+	 * (bit 15 before shifting it to 31) to clear when using interrupt
+	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
+	 * so we use them to decide on the real state of the Rx bit.
+	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
+	 */
+	if (val & 0xC0000)
+		val |= 0x8000;
+
+	inta = (0xff & val) | ((0xff00 & val) << 16);
+	return inta;
 }
 
 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
@@ -817,12 +888,61 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
 	u32 inta = 0;
 	u32 handled = 0;
-	unsigned long flags;
 	u32 i;
 
 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
 
-	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	spin_lock(&trans_pcie->irq_lock);
+
+	/* dram interrupt table not set yet,
+	 * use legacy interrupt.
+	 */
+	if (likely(trans_pcie->use_ict))
+		inta = iwl_pcie_int_cause_ict(trans);
+	else
+		inta = iwl_pcie_int_cause_non_ict(trans);
+
+	if (iwl_have_debug_level(IWL_DL_ISR)) {
+		IWL_DEBUG_ISR(trans,
+			      "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
+			      inta, trans_pcie->inta_mask,
+			      iwl_read32(trans, CSR_INT_MASK),
+			      iwl_read32(trans, CSR_FH_INT_STATUS));
+		if (inta & (~trans_pcie->inta_mask))
+			IWL_DEBUG_ISR(trans,
+				      "We got a masked interrupt (0x%08x)\n",
+				      inta & (~trans_pcie->inta_mask));
+	}
+
+	inta &= trans_pcie->inta_mask;
+
+	/*
+	 * Ignore interrupt if there's nothing in NIC to service.
+	 * This may be due to IRQ shared with another device,
+	 * or due to sporadic interrupts thrown from our NIC.
+	 */
+	if (unlikely(!inta)) {
+		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
+		/*
+		 * Re-enable interrupts here since we don't
+		 * have anything to service
+		 */
+		if (test_bit(STATUS_INT_ENABLED, &trans->status))
+			iwl_enable_interrupts(trans);
+		spin_unlock(&trans_pcie->irq_lock);
+		lock_map_release(&trans->sync_cmd_lockdep_map);
+		return IRQ_NONE;
+	}
+
+	if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+		/*
+		 * Hardware disappeared. It might have
+		 * already raised an interrupt.
+		 */
+		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+		spin_unlock(&trans_pcie->irq_lock);
+		goto out;
+	}
 
 	/* Ack/clear/reset pending uCode interrupts.
 	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
@@ -835,19 +955,13 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
 	 * hardware bugs here by ACKing all the possible interrupts so that
 	 * interrupt coalescing can still be achieved.
 	 */
-	iwl_write32(trans, CSR_INT,
-		    trans_pcie->inta | ~trans_pcie->inta_mask);
-
-	inta = trans_pcie->inta;
+	iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
 
 	if (iwl_have_debug_level(IWL_DL_ISR))
 		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
 			      inta, iwl_read32(trans, CSR_INT_MASK));
 
-	/* saved interrupt in inta variable now we can reset trans_pcie->inta */
-	trans_pcie->inta = 0;
-
-	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+	spin_unlock(&trans_pcie->irq_lock);
 
 	/* Now service all interrupt bits discovered above. */
 	if (inta & CSR_INT_BIT_HW_ERR) {
@@ -894,14 +1008,14 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
 
 		iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
 		if (hw_rfkill) {
-			set_bit(STATUS_RFKILL, &trans_pcie->status);
-			if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
-					       &trans_pcie->status))
+			set_bit(STATUS_RFKILL, &trans->status);
+			if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
+					       &trans->status))
 				IWL_DEBUG_RF_KILL(trans,
 						  "Rfkill while SYNC HCMD in flight\n");
 			wake_up(&trans_pcie->wait_command_queue);
 		} else {
-			clear_bit(STATUS_RFKILL, &trans_pcie->status);
+			clear_bit(STATUS_RFKILL, &trans->status);
 		}
 
 		handled |= CSR_INT_BIT_RF_KILL;
@@ -1005,7 +1119,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
 
 	/* Re-enable all interrupts */
 	/* only Re-enable if disabled by irq */
-	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
+	if (test_bit(STATUS_INT_ENABLED, &trans->status))
 		iwl_enable_interrupts(trans);
 	/* Re-enable RF_KILL if it occurred */
 	else if (handled & CSR_INT_BIT_RF_KILL)
@@ -1022,11 +1136,6 @@ out:
  *
  ******************************************************************************/
 
-/* a device (PCI-E) page is 4096 bytes long */
-#define ICT_SHIFT	12
-#define ICT_SIZE	(1 << ICT_SHIFT)
-#define ICT_COUNT	(ICT_SIZE / sizeof(u32))
-
 /* Free dram table */
 void iwl_pcie_free_ict(struct iwl_trans *trans)
 {
@@ -1051,7 +1160,7 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans)
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
 	trans_pcie->ict_tbl =
-		dma_alloc_coherent(trans->dev, ICT_SIZE,
+		dma_zalloc_coherent(trans->dev, ICT_SIZE,
 				   &trans_pcie->ict_tbl_dma,
 				   GFP_KERNEL);
 	if (!trans_pcie->ict_tbl)
@@ -1063,17 +1172,10 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans)
 		return -EINVAL;
 	}
 
-	IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
-		      (unsigned long long)trans_pcie->ict_tbl_dma);
-
-	IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
-
-	/* reset table and index to all 0 */
-	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
-	trans_pcie->ict_index = 0;
+	IWL_DEBUG_ISR(trans, "ict dma addr %Lx ict vir addr %p\n",
+		      (unsigned long long)trans_pcie->ict_tbl_dma,
+		      trans_pcie->ict_tbl);
 
-	/* add periodic RX interrupt */
-	trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
 	return 0;
 }
 
@@ -1084,12 +1186,11 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	u32 val;
-	unsigned long flags;
 
 	if (!trans_pcie->ict_tbl)
 		return;
 
-	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	spin_lock(&trans_pcie->irq_lock);
 	iwl_disable_interrupts(trans);
 
 	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
@@ -1106,124 +1207,26 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
 	trans_pcie->ict_index = 0;
 	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
 	iwl_enable_interrupts(trans);
-	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+	spin_unlock(&trans_pcie->irq_lock);
 }
 
 /* Device is going down disable ict interrupt usage */
 void iwl_pcie_disable_ict(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	unsigned long flags;
 
-	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	spin_lock(&trans_pcie->irq_lock);
 	trans_pcie->use_ict = false;
-	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+	spin_unlock(&trans_pcie->irq_lock);
 }
 
-/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
-static irqreturn_t iwl_pcie_isr(int irq, void *data)
+irqreturn_t iwl_pcie_isr(int irq, void *data)
 {
 	struct iwl_trans *trans = data;
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	u32 inta, inta_mask;
-	irqreturn_t ret = IRQ_NONE;
-
-	lockdep_assert_held(&trans_pcie->irq_lock);
-
-	trace_iwlwifi_dev_irq(trans->dev);
-
-	/* Disable (but don't clear!) interrupts here to avoid
-	 *    back-to-back ISRs and sporadic interrupts from our NIC.
-	 * If we have something to service, the irq thread will re-enable ints.
-	 * If we *don't* have something, we'll re-enable before leaving here. */
-	inta_mask = iwl_read32(trans, CSR_INT_MASK);
-	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
-
-	/* Discover which interrupts are active/pending */
-	inta = iwl_read32(trans, CSR_INT);
-
-	if (inta & (~inta_mask)) {
-		IWL_DEBUG_ISR(trans,
-			      "We got a masked interrupt (0x%08x)...Ack and ignore\n",
-			      inta & (~inta_mask));
-		iwl_write32(trans, CSR_INT, inta & (~inta_mask));
-		inta &= inta_mask;
-	}
-
-	/* Ignore interrupt if there's nothing in NIC to service.
-	 * This may be due to IRQ shared with another device,
-	 * or due to sporadic interrupts thrown from our NIC. */
-	if (!inta) {
-		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
-		goto none;
-	}
-
-	if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
-		/* Hardware disappeared. It might have already raised
-		 * an interrupt */
-		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
-		return IRQ_HANDLED;
-	}
-
-	if (iwl_have_debug_level(IWL_DL_ISR))
-		IWL_DEBUG_ISR(trans,
-			      "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
-			      inta, inta_mask,
-			      iwl_read32(trans, CSR_FH_INT_STATUS));
-
-	trans_pcie->inta |= inta;
-	/* the thread will service interrupts and re-enable them */
-	if (likely(inta))
-		return IRQ_WAKE_THREAD;
-
-	ret = IRQ_HANDLED;
-
-none:
-	/* re-enable interrupts here since we don't have anything to service. */
-	/* only Re-enable if disabled by irq  and no schedules tasklet. */
-	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
-	    !trans_pcie->inta)
-		iwl_enable_interrupts(trans);
-
-	return ret;
-}
-
-/* interrupt handler using ict table, with this interrupt driver will
- * stop using INTA register to get device's interrupt, reading this register
- * is expensive, device will write interrupts in ICT dram table, increment
- * index then will fire interrupt to driver, driver will OR all ICT table
- * entries from current index up to table entry with 0 value. the result is
- * the interrupt we need to service, driver will set the entries back to 0 and
- * set index.
- */
-irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
-{
-	struct iwl_trans *trans = data;
-	struct iwl_trans_pcie *trans_pcie;
-	u32 inta;
-	u32 val = 0;
-	u32 read;
-	unsigned long flags;
-	irqreturn_t ret = IRQ_NONE;
 
 	if (!trans)
 		return IRQ_NONE;
 
-	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
-	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-
-	/* dram interrupt table not set yet,
-	 * use legacy interrupt.
-	 */
-	if (unlikely(!trans_pcie->use_ict)) {
-		ret = iwl_pcie_isr(irq, data);
-		spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-		return ret;
-	}
-
-	trace_iwlwifi_dev_irq(trans->dev);
-
 	/* Disable (but don't clear!) interrupts here to avoid
 	 * back-to-back ISRs and sporadic interrupts from our NIC.
 	 * If we have something to service, the tasklet will re-enable ints.
@@ -1231,73 +1234,5 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
 	 */
 	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
 
-	/* Ignore interrupt if there's nothing in NIC to service.
-	 * This may be due to IRQ shared with another device,
-	 * or due to sporadic interrupts thrown from our NIC. */
-	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
-	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
-	if (!read) {
-		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
-		goto none;
-	}
-
-	/*
-	 * Collect all entries up to the first 0, starting from ict_index;
-	 * note we already read at ict_index.
-	 */
-	do {
-		val |= read;
-		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
-				trans_pcie->ict_index, read);
-		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
-		trans_pcie->ict_index =
-			iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
-
-		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
-		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
-					   read);
-	} while (read);
-
-	/* We should not get this value, just ignore it. */
-	if (val == 0xffffffff)
-		val = 0;
-
-	/*
-	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
-	 * (bit 15 before shifting it to 31) to clear when using interrupt
-	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
-	 * so we use them to decide on the real state of the Rx bit.
-	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
-	 */
-	if (val & 0xC0000)
-		val |= 0x8000;
-
-	inta = (0xff & val) | ((0xff00 & val) << 16);
-	IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n",
-		      inta, trans_pcie->inta_mask, val);
-	if (iwl_have_debug_level(IWL_DL_ISR))
-		IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n",
-			      iwl_read32(trans, CSR_INT_MASK));
-
-	inta &= trans_pcie->inta_mask;
-	trans_pcie->inta |= inta;
-
-	/* iwl_pcie_tasklet() will service interrupts and re-enable them */
-	if (likely(inta)) {
-		spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-		return IRQ_WAKE_THREAD;
-	}
-
-	ret = IRQ_HANDLED;
-
- none:
-	/* re-enable interrupts here since we don't have anything to service.
-	 * only Re-enable if disabled by irq.
-	 */
-	if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
-	    !trans_pcie->inta)
-		iwl_enable_interrupts(trans);
-
-	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-	return ret;
+	return IRQ_WAKE_THREAD;
 }
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index cde9c16f6e4f..f9507807b486 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -75,33 +75,6 @@
 #include "iwl-agn-hw.h"
 #include "internal.h"
 
-static void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
-						  u32 reg, u32 mask, u32 value)
-{
-	u32 v;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-	WARN_ON_ONCE(value & ~mask);
-#endif
-
-	v = iwl_read32(trans, reg);
-	v &= ~mask;
-	v |= value;
-	iwl_write32(trans, reg, v);
-}
-
-static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
-					      u32 reg, u32 mask)
-{
-	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
-}
-
-static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
-					    u32 reg, u32 mask)
-{
-	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
-}
-
 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
 {
 	if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -150,7 +123,6 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
  */
 static int iwl_pcie_apm_init(struct iwl_trans *trans)
 {
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	int ret = 0;
 	IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
 
@@ -206,6 +178,28 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
 		goto out;
 	}
 
+	if (trans->cfg->host_interrupt_operation_mode) {
+		/*
+		 * This is a bit of an abuse - This is needed for 7260 / 3160
+		 * only check host_interrupt_operation_mode even if this is
+		 * not related to host_interrupt_operation_mode.
+		 *
+		 * Enable the oscillator to count wake up time for L1 exit. This
+		 * consumes slightly more power (100uA) - but allows to be sure
+		 * that we wake up from L1 on time.
+		 *
+		 * This looks weird: read twice the same register, discard the
+		 * value, set a bit, and yet again, read that same register
+		 * just to discard the value. But that's the way the hardware
+		 * seems to like it.
+		 */
+		iwl_read_prph(trans, OSC_CLK);
+		iwl_read_prph(trans, OSC_CLK);
+		iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
+		iwl_read_prph(trans, OSC_CLK);
+		iwl_read_prph(trans, OSC_CLK);
+	}
+
 	/*
 	 * Enable DMA clock and wait for it to stabilize.
 	 *
@@ -223,7 +217,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
 	/* Clear the interrupt in APMG if the NIC is in RFKILL */
 	iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL);
 
-	set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+	set_bit(STATUS_DEVICE_ENABLED, &trans->status);
 
 out:
 	return ret;
@@ -249,10 +243,9 @@ static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
 
 static void iwl_pcie_apm_stop(struct iwl_trans *trans)
 {
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
 
-	clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+	clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
 
 	/* Stop device's DMA activity */
 	iwl_pcie_apm_stop_master(trans);
@@ -273,13 +266,12 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans)
 static int iwl_pcie_nic_init(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	unsigned long flags;
 
 	/* nic_init */
-	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	spin_lock(&trans_pcie->irq_lock);
 	iwl_pcie_apm_init(trans);
 
-	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+	spin_unlock(&trans_pcie->irq_lock);
 
 	iwl_pcie_set_pwr(trans, false);
 
@@ -582,7 +574,6 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
 				   const struct fw_img *fw, bool run_in_rfkill)
 {
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	int ret;
 	bool hw_rfkill;
 
@@ -592,16 +583,14 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
 		return -EIO;
 	}
 
-	clear_bit(STATUS_FW_ERROR, &trans_pcie->status);
-
 	iwl_enable_rfkill_int(trans);
 
 	/* If platform's RF_KILL switch is NOT set to KILL */
 	hw_rfkill = iwl_is_rfkill_set(trans);
 	if (hw_rfkill)
-		set_bit(STATUS_RFKILL, &trans_pcie->status);
+		set_bit(STATUS_RFKILL, &trans->status);
 	else
-		clear_bit(STATUS_RFKILL, &trans_pcie->status);
+		clear_bit(STATUS_RFKILL, &trans->status);
 	iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
 	if (hw_rfkill && !run_in_rfkill)
 		return -ERFKILL;
@@ -640,12 +629,14 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	unsigned long flags;
+	bool hw_rfkill, was_hw_rfkill;
+
+	was_hw_rfkill = iwl_is_rfkill_set(trans);
 
 	/* tell the device to stop sending interrupts */
-	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	spin_lock(&trans_pcie->irq_lock);
 	iwl_disable_interrupts(trans);
-	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+	spin_unlock(&trans_pcie->irq_lock);
 
 	/* device going down, Stop using ICT table */
 	iwl_pcie_disable_ict(trans);
@@ -657,7 +648,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
 	 * restart. So don't process again if the device is
 	 * already dead.
 	 */
-	if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
+	if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
 		iwl_pcie_tx_stop(trans);
 		iwl_pcie_rx_stop(trans);
 
@@ -677,21 +668,45 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
 	 * Clean again the interrupt here
 	 */
-	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	spin_lock(&trans_pcie->irq_lock);
 	iwl_disable_interrupts(trans);
-	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-
-	iwl_enable_rfkill_int(trans);
+	spin_unlock(&trans_pcie->irq_lock);
 
 	/* stop and reset the on-board processor */
 	iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
 
 	/* clear all status bits */
-	clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
-	clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
-	clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
-	clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
-	clear_bit(STATUS_RFKILL, &trans_pcie->status);
+	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+	clear_bit(STATUS_INT_ENABLED, &trans->status);
+	clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
+	clear_bit(STATUS_TPOWER_PMI, &trans->status);
+	clear_bit(STATUS_RFKILL, &trans->status);
+
+	/*
+	 * Even if we stop the HW, we still want the RF kill
+	 * interrupt
+	 */
+	iwl_enable_rfkill_int(trans);
+
+	/*
+	 * Check again since the RF kill state may have changed while
+	 * all the interrupts were disabled, in this case we couldn't
+	 * receive the RF kill interrupt and update the state in the
+	 * op_mode.
+	 * Don't call the op_mode if the rkfill state hasn't changed.
+	 * This allows the op_mode to call stop_device from the rfkill
+	 * notification without endless recursion. Under very rare
+	 * circumstances, we might have a small recursion if the rfkill
+	 * state changed exactly now while we were called from stop_device.
+	 * This is very unlikely but can happen and is supported.
+	 */
+	hw_rfkill = iwl_is_rfkill_set(trans);
+	if (hw_rfkill)
+		set_bit(STATUS_RFKILL, &trans->status);
+	else
+		clear_bit(STATUS_RFKILL, &trans->status);
+	if (hw_rfkill != was_hw_rfkill)
+		iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
 }
 
 static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
@@ -776,7 +791,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
 
 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
 {
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	bool hw_rfkill;
 	int err;
 
@@ -787,7 +801,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
 	}
 
 	/* Reset the entire device */
-	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+	iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
 
 	usleep_range(10, 15);
 
@@ -798,53 +812,30 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
 
 	hw_rfkill = iwl_is_rfkill_set(trans);
 	if (hw_rfkill)
-		set_bit(STATUS_RFKILL, &trans_pcie->status);
+		set_bit(STATUS_RFKILL, &trans->status);
 	else
-		clear_bit(STATUS_RFKILL, &trans_pcie->status);
+		clear_bit(STATUS_RFKILL, &trans->status);
 	iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
 
 	return 0;
 }
 
-static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
-				   bool op_mode_leaving)
+static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	bool hw_rfkill;
-	unsigned long flags;
 
-	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	/* disable interrupts - don't enable HW RF kill interrupt */
+	spin_lock(&trans_pcie->irq_lock);
 	iwl_disable_interrupts(trans);
-	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+	spin_unlock(&trans_pcie->irq_lock);
 
 	iwl_pcie_apm_stop(trans);
 
-	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	spin_lock(&trans_pcie->irq_lock);
 	iwl_disable_interrupts(trans);
-	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+	spin_unlock(&trans_pcie->irq_lock);
 
 	iwl_pcie_disable_ict(trans);
-
-	if (!op_mode_leaving) {
-		/*
-		 * Even if we stop the HW, we still want the RF kill
-		 * interrupt
-		 */
-		iwl_enable_rfkill_int(trans);
-
-		/*
-		 * Check again since the RF kill state may have changed while
-		 * all the interrupts were disabled, in this case we couldn't
-		 * receive the RF kill interrupt and update the state in the
-		 * op_mode.
-		 */
-		hw_rfkill = iwl_is_rfkill_set(trans);
-		if (hw_rfkill)
-			set_bit(STATUS_RFKILL, &trans_pcie->status);
-		else
-			clear_bit(STATUS_RFKILL, &trans_pcie->status);
-		iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
-	}
 }
 
 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@ -928,12 +919,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
 
 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
 {
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
 	if (state)
-		set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+		set_bit(STATUS_TPOWER_PMI, &trans->status);
 	else
-		clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+		clear_bit(STATUS_TPOWER_PMI, &trans->status);
 }
 
 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
@@ -944,6 +933,9 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
 
 	spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
 
+	if (trans_pcie->cmd_in_flight)
+		goto out;
+
 	/* this bit wakes up the NIC */
 	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
 				 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -983,6 +975,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
 		}
 	}
 
+out:
 	/*
 	 * Fool sparse by faking we release the lock - sparse will
 	 * track nic_access anyway.
@@ -1004,6 +997,9 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
 	 */
 	__acquire(&trans_pcie->reg_lock);
 
+	if (trans_pcie->cmd_in_flight)
+		goto out;
+
 	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
 				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
 	/*
@@ -1013,6 +1009,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
 	 * scheduled on different CPUs (after we drop reg_lock).
 	 */
 	mmiowb();
+out:
 	spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
 }
 
@@ -1457,7 +1454,7 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
 
 static const struct iwl_trans_ops trans_ops_pcie = {
 	.start_hw = iwl_trans_pcie_start_hw,
-	.stop_hw = iwl_trans_pcie_stop_hw,
+	.op_mode_leave = iwl_trans_pcie_op_mode_leave,
 	.fw_alive = iwl_trans_pcie_fw_alive,
 	.start_fw = iwl_trans_pcie_start_fw,
 	.stop_device = iwl_trans_pcie_stop_device,
@@ -1609,7 +1606,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 	if (iwl_pcie_alloc_ict(trans))
 		goto out_free_cmd_pool;
 
-	err = request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
+	err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
 				   iwl_pcie_irq_handler,
 				   IRQF_SHARED, DRV_NAME, trans);
 	if (err) {
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 0adde919a258..3d549008b3e2 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -207,7 +207,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
 		IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
 			le32_to_cpu(txq->scratchbufs[i].scratch));
 
-	iwl_nic_error(trans);
+	iwl_trans_fw_error(trans);
 }
 
 /*
@@ -289,21 +289,21 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
  */
 void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
 {
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	u32 reg = 0;
 	int txq_id = txq->q.id;
 
 	if (txq->need_update == 0)
 		return;
 
-	if (trans->cfg->base_params->shadow_reg_enable) {
+	if (trans->cfg->base_params->shadow_reg_enable ||
+	    txq_id == trans_pcie->cmd_queue) {
 		/* shadow register enabled */
 		iwl_write32(trans, HBUS_TARG_WRPTR,
 			    txq->q.write_ptr | (txq_id << 8));
 	} else {
-		struct iwl_trans_pcie *trans_pcie =
-			IWL_TRANS_GET_PCIE_TRANS(trans);
 		/* if we're trying to save power */
-		if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
+		if (test_bit(STATUS_TPOWER_PMI, &trans->status)) {
 			/* wake up nic if it's powered down ...
 			 * uCode will wake up, and interrupt us again, so next
 			 * time we'll skip this part. */
@@ -739,10 +739,9 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	int ch, txq_id, ret;
-	unsigned long flags;
 
 	/* Turn off all Tx DMA fifos */
-	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	spin_lock(&trans_pcie->irq_lock);
 
 	iwl_pcie_txq_set_sched(trans, 0);
 
@@ -759,13 +758,19 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
 				iwl_read_direct32(trans,
 						  FH_TSSR_TX_STATUS_REG));
 	}
-	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+	spin_unlock(&trans_pcie->irq_lock);
 
-	if (!trans_pcie->txq) {
-		IWL_WARN(trans,
-			 "Stopping tx queues that aren't allocated...\n");
+	/*
+	 * This function can be called before the op_mode disabled the
+	 * queues. This happens when we have an rfkill interrupt.
+	 * Since we stop Tx altogether - mark the queues as stopped.
+	 */
+	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+	/* This can happen: start_hw, stop_device */
+	if (!trans_pcie->txq)
 		return 0;
-	}
 
 	/* Unmap DMA from host system and free skb's */
 	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
@@ -867,7 +872,6 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	int ret;
 	int txq_id, slots_num;
-	unsigned long flags;
 	bool alloc = false;
 
 	if (!trans_pcie->txq) {
@@ -877,7 +881,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
 		alloc = true;
 	}
 
-	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	spin_lock(&trans_pcie->irq_lock);
 
 	/* Turn off all Tx DMA fifos */
 	iwl_write_prph(trans, SCD_TXFACT, 0);
@@ -886,7 +890,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
 	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
 			   trans_pcie->kw.dma >> 4);
 
-	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+	spin_unlock(&trans_pcie->irq_lock);
 
 	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
 	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
@@ -1005,6 +1009,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_txq *txq = &trans_pcie->txq[txq_id];
 	struct iwl_queue *q = &txq->q;
+	unsigned long flags;
 	int nfreed = 0;
 
 	lockdep_assert_held(&txq->lock);
@@ -1023,10 +1028,20 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
 		if (nfreed++ > 0) {
 			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
 				idx, q->write_ptr, q->read_ptr);
-			iwl_nic_error(trans);
+			iwl_trans_fw_error(trans);
 		}
 	}
 
+	if (q->read_ptr == q->write_ptr) {
+		spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+		WARN_ON(!trans_pcie->cmd_in_flight);
+		trans_pcie->cmd_in_flight = false;
+		__iwl_trans_pcie_clear_bit(trans,
+					   CSR_GP_CNTRL,
+					   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+		spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+	}
+
 	iwl_pcie_txq_progress(trans_pcie, txq);
 }
 
@@ -1143,8 +1158,15 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
 			SCD_TX_STTS_QUEUE_OFFSET(txq_id);
 	static const u32 zero_val[4] = {};
 
+	/*
+	 * Upon HW Rfkill - we stop the device, and then stop the queues
+	 * in the op_mode. Just for the sake of the simplicity of the op_mode,
+	 * allow the op_mode to call txq_disable after it already called
+	 * stop_device.
+	 */
 	if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
-		WARN_ONCE(1, "queue %d not used", txq_id);
+		WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
+			  "queue %d not used", txq_id);
 		return;
 	}
 
@@ -1178,12 +1200,13 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
 	struct iwl_queue *q = &txq->q;
 	struct iwl_device_cmd *out_cmd;
 	struct iwl_cmd_meta *out_meta;
+	unsigned long flags;
 	void *dup_buf = NULL;
 	dma_addr_t phys_addr;
 	int idx;
 	u16 copy_size, cmd_size, scratch_size;
 	bool had_nocopy = false;
-	int i;
+	int i, ret;
 	u32 cmd_pos;
 	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
 	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
@@ -1381,10 +1404,38 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
 	if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
 		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
 
+	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+
+	/*
+	 * wake up the NIC to make sure that the firmware will see the host
+	 * command - we will let the NIC sleep once all the host commands
+	 * returned.
+	 */
+	if (!trans_pcie->cmd_in_flight) {
+		trans_pcie->cmd_in_flight = true;
+		__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+					 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+		ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+				   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+				   (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+				    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
+				   15000);
+		if (ret < 0) {
+			__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+			spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+			trans_pcie->cmd_in_flight = false;
+			idx = -EIO;
+			goto out;
+		}
+	}
+
 	/* Increment and update queue's write index */
 	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
 	iwl_pcie_txq_inc_wr_ptr(trans, txq);
 
+	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+
  out:
 	spin_unlock_bh(&txq->lock);
  free_dup_buf:
@@ -1449,12 +1500,12 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
 	iwl_pcie_cmdq_reclaim(trans, txq_id, index);
 
 	if (!(meta->flags & CMD_ASYNC)) {
-		if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
+		if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
 			IWL_WARN(trans,
 				 "HCMD_ACTIVE already clear for command %s\n",
 				 get_cmd_string(trans_pcie, cmd->hdr.cmd));
 		}
-		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
 		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
 			       get_cmd_string(trans_pcie, cmd->hdr.cmd));
 		wake_up(&trans_pcie->wait_command_queue);
@@ -1466,7 +1517,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
 }
 
 #define HOST_COMPLETE_TIMEOUT	(2 * HZ)
-#define COMMAND_POKE_TIMEOUT	(HZ / 10)
 
 static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
 				    struct iwl_host_cmd *cmd)
@@ -1494,13 +1544,12 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	int cmd_idx;
 	int ret;
-	int timeout = HOST_COMPLETE_TIMEOUT;
 
 	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
 		       get_cmd_string(trans_pcie, cmd->id));
 
-	if (WARN(test_and_set_bit(STATUS_HCMD_ACTIVE,
-				  &trans_pcie->status),
+	if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
+				  &trans->status),
 		 "Command %s: a command is already active!\n",
 		 get_cmd_string(trans_pcie, cmd->id)))
 		return -EIO;
@@ -1511,64 +1560,39 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
 	cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
 	if (cmd_idx < 0) {
 		ret = cmd_idx;
-		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
 		IWL_ERR(trans,
 			"Error sending %s: enqueue_hcmd failed: %d\n",
 			get_cmd_string(trans_pcie, cmd->id), ret);
 		return ret;
 	}
 
-	while (timeout > 0) {
-		unsigned long flags;
-
-		timeout -= COMMAND_POKE_TIMEOUT;
-		ret = wait_event_timeout(trans_pcie->wait_command_queue,
-					 !test_bit(STATUS_HCMD_ACTIVE,
-						   &trans_pcie->status),
-					 COMMAND_POKE_TIMEOUT);
-		if (ret)
-			break;
-		/* poke the device - it may have lost the command */
-		if (iwl_trans_grab_nic_access(trans, true, &flags)) {
-			iwl_trans_release_nic_access(trans, &flags);
-			IWL_DEBUG_INFO(trans,
-				       "Tried to wake NIC for command %s\n",
-				       get_cmd_string(trans_pcie, cmd->id));
-		} else {
-			IWL_ERR(trans, "Failed to poke NIC for command %s\n",
-				get_cmd_string(trans_pcie, cmd->id));
-			break;
-		}
-	}
-
+	ret = wait_event_timeout(trans_pcie->wait_command_queue,
+				 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
+					   &trans->status),
+				 HOST_COMPLETE_TIMEOUT);
 	if (!ret) {
-		if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
-			struct iwl_txq *txq =
-				&trans_pcie->txq[trans_pcie->cmd_queue];
-			struct iwl_queue *q = &txq->q;
+		struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+		struct iwl_queue *q = &txq->q;
 
-			IWL_ERR(trans,
-				"Error sending %s: time out after %dms.\n",
-				get_cmd_string(trans_pcie, cmd->id),
-				jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+		IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
+			get_cmd_string(trans_pcie, cmd->id),
+			jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
 
-			IWL_ERR(trans,
-				"Current CMD queue read_ptr %d write_ptr %d\n",
-				q->read_ptr, q->write_ptr);
+		IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
+			q->read_ptr, q->write_ptr);
 
-			clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
-			IWL_DEBUG_INFO(trans,
-				       "Clearing HCMD_ACTIVE for command %s\n",
-				       get_cmd_string(trans_pcie, cmd->id));
-			ret = -ETIMEDOUT;
+		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+			       get_cmd_string(trans_pcie, cmd->id));
+		ret = -ETIMEDOUT;
 
-			iwl_nic_error(trans);
+		iwl_trans_fw_error(trans);
 
-			goto cancel;
-		}
+		goto cancel;
 	}
 
-	if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) {
+	if (test_bit(STATUS_FW_ERROR, &trans->status)) {
 		IWL_ERR(trans, "FW error in SYNC CMD %s\n",
 			get_cmd_string(trans_pcie, cmd->id));
 		dump_stack();
@@ -1577,7 +1601,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
 	}
 
 	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
-	    test_bit(STATUS_RFKILL, &trans_pcie->status)) {
+	    test_bit(STATUS_RFKILL, &trans->status)) {
 		IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
 		ret = -ERFKILL;
 		goto cancel;
@@ -1614,13 +1638,8 @@ cancel:
 
 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
 {
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
-	if (test_bit(STATUS_FW_ERROR, &trans_pcie->status))
-		return -EIO;
-
 	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
-	    test_bit(STATUS_RFKILL, &trans_pcie->status)) {
+	    test_bit(STATUS_RFKILL, &trans->status)) {
 		IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
 				  cmd->id);
 		return -ERFKILL;
@@ -1674,7 +1693,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 	txq->entries[q->write_ptr].skb = skb;
 	txq->entries[q->write_ptr].cmd = dev_cmd;
 
-	dev_cmd->hdr.cmd = REPLY_TX;
 	dev_cmd->hdr.sequence =
 		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
 			    INDEX_TO_SEQ(q->write_ptr)));
diff --git a/drivers/net/wireless/libertas/README b/drivers/net/wireless/libertas/README
index 91f2ca90c70f..1a554a685e91 100644
--- a/drivers/net/wireless/libertas/README
+++ b/drivers/net/wireless/libertas/README
@@ -8,9 +8,8 @@
  Ltd. under the terms of the GNU General Public License Version 2, June 1991
  (the "License").  You may use, redistribute and/or modify this File in
  accordance with the terms and conditions of the License, a copy of which
- is available along with the File in the license.txt file or by writing to
- the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 or on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+ is available along with the File in the license.txt file or on the worldwide
+ web at http://www.gnu.org/licenses/gpl.txt.
 
  THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
  IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 116f4aba08d6..32f75007a825 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1268,14 +1268,9 @@ static struct cfg80211_scan_request *
 _new_connect_scan_req(struct wiphy *wiphy, struct cfg80211_connect_params *sme)
 {
 	struct cfg80211_scan_request *creq = NULL;
-	int i, n_channels = 0;
+	int i, n_channels = ieee80211_get_num_supported_channels(wiphy);
 	enum ieee80211_band band;
 
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
-		if (wiphy->bands[band])
-			n_channels += wiphy->bands[band]->n_channels;
-	}
-
 	creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
 		       n_channels * sizeof(void *),
 		       GFP_ATOMIC);
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 991238afd1b6..58c6ee5de98f 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -849,7 +849,7 @@ static void if_sdio_finish_power_on(struct if_sdio_card *card)
 			card->started = true;
 			/* Tell PM core that we don't need the card to be
 			 * powered now */
-			pm_runtime_put_noidle(&func->dev);
+			pm_runtime_put(&func->dev);
 		}
 	}
 
@@ -907,8 +907,8 @@ static int if_sdio_power_on(struct if_sdio_card *card)
 	sdio_release_host(func);
 	ret = if_sdio_prog_firmware(card);
 	if (ret) {
-		sdio_disable_func(func);
-		return ret;
+		sdio_claim_host(func);
+		goto disable;
 	}
 
 	return 0;
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 83669151bb82..f11728a866ff 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -93,7 +93,6 @@ static void free_if_spi_card(struct if_spi_card *card)
 		list_del(&packet->list);
 		kfree(packet);
 	}
-	spi_set_drvdata(card->spi, NULL);
 	kfree(card);
 }
 
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index a1b32ee9594a..69d4c3179d04 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -159,10 +159,15 @@ static const struct ieee80211_regdomain hwsim_world_regdom_custom_02 = {
 	.reg_rules = {
 		REG_RULE(2412-10, 2462+10, 40, 0, 20, 0),
 		REG_RULE(5725-10, 5850+10, 40, 0, 30,
-			NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+			 NL80211_RRF_NO_IR),
 	}
 };
 
+static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = {
+	&hwsim_world_regdom_custom_01,
+	&hwsim_world_regdom_custom_02,
+};
+
 struct hwsim_vif_priv {
 	u32 magic;
 	u8 bssid[ETH_ALEN];
@@ -321,8 +326,52 @@ static const struct ieee80211_rate hwsim_rates[] = {
 	{ .bitrate = 540 }
 };
 
+static const struct ieee80211_iface_limit hwsim_if_limits[] = {
+	{ .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
+	{ .max = 2048,  .types = BIT(NL80211_IFTYPE_STATION) |
+				 BIT(NL80211_IFTYPE_P2P_CLIENT) |
+#ifdef CONFIG_MAC80211_MESH
+				 BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+				 BIT(NL80211_IFTYPE_AP) |
+				 BIT(NL80211_IFTYPE_P2P_GO) },
+	{ .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
+};
+
+static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = {
+	{ .max = 8, .types = BIT(NL80211_IFTYPE_AP) },
+};
+
+static const struct ieee80211_iface_combination hwsim_if_comb[] = {
+	{
+		.limits = hwsim_if_limits,
+		.n_limits = ARRAY_SIZE(hwsim_if_limits),
+		.max_interfaces = 2048,
+		.num_different_channels = 1,
+	},
+	{
+		.limits = hwsim_if_dfs_limits,
+		.n_limits = ARRAY_SIZE(hwsim_if_dfs_limits),
+		.max_interfaces = 8,
+		.num_different_channels = 1,
+		.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+				       BIT(NL80211_CHAN_WIDTH_20) |
+				       BIT(NL80211_CHAN_WIDTH_40) |
+				       BIT(NL80211_CHAN_WIDTH_80) |
+				       BIT(NL80211_CHAN_WIDTH_160),
+	}
+};
+
 static spinlock_t hwsim_radio_lock;
 static struct list_head hwsim_radios;
+static int hwsim_radio_idx;
+
+static struct platform_driver mac80211_hwsim_driver = {
+	.driver = {
+		.name = "mac80211_hwsim",
+		.owner = THIS_MODULE,
+	},
+};
 
 struct mac80211_hwsim_data {
 	struct list_head list;
@@ -332,8 +381,10 @@ struct mac80211_hwsim_data {
 	struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)];
 	struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
 	struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
+	struct ieee80211_iface_combination if_combination;
 
 	struct mac_address addresses[2];
+	int channels, idx;
 
 	struct ieee80211_channel *tmp_chan;
 	struct delayed_work roc_done;
@@ -353,7 +404,6 @@ struct mac80211_hwsim_data {
 	} ps;
 	bool ps_poll_pending;
 	struct dentry *debugfs;
-	struct dentry *debugfs_ps;
 
 	struct sk_buff_head pending;	/* packets pending */
 	/*
@@ -362,7 +412,6 @@ struct mac80211_hwsim_data {
 	 * radio can be in more then one group.
 	 */
 	u64 group;
-	struct dentry *debugfs_group;
 
 	int power_level;
 
@@ -403,21 +452,179 @@ static struct genl_family hwsim_genl_family = {
 /* MAC80211_HWSIM netlink policy */
 
 static struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
-	[HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC,
-				       .len = 6*sizeof(u8) },
-	[HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC,
-					  .len = 6*sizeof(u8) },
+	[HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
+	[HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
 	[HWSIM_ATTR_FRAME] = { .type = NLA_BINARY,
 			       .len = IEEE80211_MAX_DATA_LEN },
 	[HWSIM_ATTR_FLAGS] = { .type = NLA_U32 },
 	[HWSIM_ATTR_RX_RATE] = { .type = NLA_U32 },
 	[HWSIM_ATTR_SIGNAL] = { .type = NLA_U32 },
 	[HWSIM_ATTR_TX_INFO] = { .type = NLA_UNSPEC,
-				 .len = IEEE80211_TX_MAX_RATES*sizeof(
-					struct hwsim_tx_rate)},
+				 .len = IEEE80211_TX_MAX_RATES *
+					sizeof(struct hwsim_tx_rate)},
 	[HWSIM_ATTR_COOKIE] = { .type = NLA_U64 },
+	[HWSIM_ATTR_CHANNELS] = { .type = NLA_U32 },
+	[HWSIM_ATTR_RADIO_ID] = { .type = NLA_U32 },
+	[HWSIM_ATTR_REG_HINT_ALPHA2] = { .type = NLA_STRING, .len = 2 },
+	[HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 },
+	[HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG },
 };
 
+static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
+				    struct sk_buff *skb,
+				    struct ieee80211_channel *chan);
+
+/* sysfs attributes */
+static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
+{
+	struct mac80211_hwsim_data *data = dat;
+	struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
+	struct sk_buff *skb;
+	struct ieee80211_pspoll *pspoll;
+
+	if (!vp->assoc)
+		return;
+
+	wiphy_debug(data->hw->wiphy,
+		    "%s: send PS-Poll to %pM for aid %d\n",
+		    __func__, vp->bssid, vp->aid);
+
+	skb = dev_alloc_skb(sizeof(*pspoll));
+	if (!skb)
+		return;
+	pspoll = (void *) skb_put(skb, sizeof(*pspoll));
+	pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
+					    IEEE80211_STYPE_PSPOLL |
+					    IEEE80211_FCTL_PM);
+	pspoll->aid = cpu_to_le16(0xc000 | vp->aid);
+	memcpy(pspoll->bssid, vp->bssid, ETH_ALEN);
+	memcpy(pspoll->ta, mac, ETH_ALEN);
+
+	rcu_read_lock();
+	mac80211_hwsim_tx_frame(data->hw, skb,
+				rcu_dereference(vif->chanctx_conf)->def.chan);
+	rcu_read_unlock();
+}
+
+static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
+				struct ieee80211_vif *vif, int ps)
+{
+	struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
+	struct sk_buff *skb;
+	struct ieee80211_hdr *hdr;
+
+	if (!vp->assoc)
+		return;
+
+	wiphy_debug(data->hw->wiphy,
+		    "%s: send data::nullfunc to %pM ps=%d\n",
+		    __func__, vp->bssid, ps);
+
+	skb = dev_alloc_skb(sizeof(*hdr));
+	if (!skb)
+		return;
+	hdr = (void *) skb_put(skb, sizeof(*hdr) - ETH_ALEN);
+	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+					 IEEE80211_STYPE_NULLFUNC |
+					 (ps ? IEEE80211_FCTL_PM : 0));
+	hdr->duration_id = cpu_to_le16(0);
+	memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
+	memcpy(hdr->addr2, mac, ETH_ALEN);
+	memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
+
+	rcu_read_lock();
+	mac80211_hwsim_tx_frame(data->hw, skb,
+				rcu_dereference(vif->chanctx_conf)->def.chan);
+	rcu_read_unlock();
+}
+
+
+static void hwsim_send_nullfunc_ps(void *dat, u8 *mac,
+				   struct ieee80211_vif *vif)
+{
+	struct mac80211_hwsim_data *data = dat;
+	hwsim_send_nullfunc(data, mac, vif, 1);
+}
+
+static void hwsim_send_nullfunc_no_ps(void *dat, u8 *mac,
+				      struct ieee80211_vif *vif)
+{
+	struct mac80211_hwsim_data *data = dat;
+	hwsim_send_nullfunc(data, mac, vif, 0);
+}
+
+static int hwsim_fops_ps_read(void *dat, u64 *val)
+{
+	struct mac80211_hwsim_data *data = dat;
+	*val = data->ps;
+	return 0;
+}
+
+static int hwsim_fops_ps_write(void *dat, u64 val)
+{
+	struct mac80211_hwsim_data *data = dat;
+	enum ps_mode old_ps;
+
+	if (val != PS_DISABLED && val != PS_ENABLED && val != PS_AUTO_POLL &&
+	    val != PS_MANUAL_POLL)
+		return -EINVAL;
+
+	old_ps = data->ps;
+	data->ps = val;
+
+	if (val == PS_MANUAL_POLL) {
+		ieee80211_iterate_active_interfaces(data->hw,
+						    IEEE80211_IFACE_ITER_NORMAL,
+						    hwsim_send_ps_poll, data);
+		data->ps_poll_pending = true;
+	} else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
+		ieee80211_iterate_active_interfaces(data->hw,
+						    IEEE80211_IFACE_ITER_NORMAL,
+						    hwsim_send_nullfunc_ps,
+						    data);
+	} else if (old_ps != PS_DISABLED && val == PS_DISABLED) {
+		ieee80211_iterate_active_interfaces(data->hw,
+						    IEEE80211_IFACE_ITER_NORMAL,
+						    hwsim_send_nullfunc_no_ps,
+						    data);
+	}
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write,
+			"%llu\n");
+
+static int hwsim_write_simulate_radar(void *dat, u64 val)
+{
+	struct mac80211_hwsim_data *data = dat;
+
+	ieee80211_radar_detected(data->hw);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(hwsim_simulate_radar, NULL,
+			hwsim_write_simulate_radar, "%llu\n");
+
+static int hwsim_fops_group_read(void *dat, u64 *val)
+{
+	struct mac80211_hwsim_data *data = dat;
+	*val = data->group;
+	return 0;
+}
+
+static int hwsim_fops_group_write(void *dat, u64 val)
+{
+	struct mac80211_hwsim_data *data = dat;
+	data->group = val;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group,
+			hwsim_fops_group_read, hwsim_fops_group_write,
+			"%llx\n");
+
 static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
 					struct net_device *dev)
 {
@@ -641,7 +848,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
 	}
 
 	if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
-		    sizeof(struct mac_address), data->addresses[1].addr))
+		    ETH_ALEN, data->addresses[1].addr))
 		goto nla_put_failure;
 
 	/* We get the skb->data */
@@ -880,7 +1087,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
 		return;
 	}
 
-	if (channels == 1) {
+	if (data->channels == 1) {
 		channel = data->channel;
 	} else if (txi->hw_queue == 4) {
 		channel = data->tmp_chan;
@@ -908,7 +1115,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
 	if (control->sta)
 		hwsim_check_sta_magic(control->sta);
 
-	if (rctbl)
+	if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE)
 		ieee80211_get_tx_rates(txi->control.vif, control->sta, skb,
 				       txi->control.rates,
 				       ARRAY_SIZE(txi->control.rates));
@@ -1015,7 +1222,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
 {
 	u32 _pid = ACCESS_ONCE(wmediumd_portid);
 
-	if (rctbl) {
+	if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE) {
 		struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
 		ieee80211_get_tx_rates(txi->control.vif, NULL, skb,
 				       txi->control.rates,
@@ -1052,7 +1259,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
 	if (skb == NULL)
 		return;
 	info = IEEE80211_SKB_CB(skb);
-	if (rctbl)
+	if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE)
 		ieee80211_get_tx_rates(vif, NULL, skb,
 				       info->control.rates,
 				       ARRAY_SIZE(info->control.rates));
@@ -1143,7 +1350,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
 
 	data->channel = conf->chandef.chan;
 
-	WARN_ON(data->channel && channels > 1);
+	WARN_ON(data->channel && data->channels > 1);
 
 	data->power_level = conf->power_level;
 	if (!data->started || !data->beacon_int)
@@ -1390,8 +1597,6 @@ static const struct nla_policy hwsim_testmode_policy[HWSIM_TM_ATTR_MAX + 1] = {
 	[HWSIM_TM_ATTR_PS] = { .type = NLA_U32 },
 };
 
-static int hwsim_fops_ps_write(void *dat, u64 val);
-
 static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
 				       struct ieee80211_vif *vif,
 				       void *data, int len)
@@ -1493,7 +1698,7 @@ static void hw_scan_work(struct work_struct *work)
 		    req->channels[hwsim->scan_chan_idx]->center_freq);
 
 	hwsim->tmp_chan = req->channels[hwsim->scan_chan_idx];
-	if (hwsim->tmp_chan->flags & IEEE80211_CHAN_PASSIVE_SCAN ||
+	if (hwsim->tmp_chan->flags & IEEE80211_CHAN_NO_IR ||
 	    !req->n_ssids) {
 		dwell = 120;
 	} else {
@@ -1702,8 +1907,7 @@ static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw,
 	hwsim_check_chanctx_magic(ctx);
 }
 
-static struct ieee80211_ops mac80211_hwsim_ops =
-{
+static const struct ieee80211_ops mac80211_hwsim_ops = {
 	.tx = mac80211_hwsim_tx,
 	.start = mac80211_hwsim_start,
 	.stop = mac80211_hwsim_stop,
@@ -1728,208 +1932,290 @@ static struct ieee80211_ops mac80211_hwsim_ops =
 	.set_tsf = mac80211_hwsim_set_tsf,
 };
 
+static struct ieee80211_ops mac80211_hwsim_mchan_ops;
 
-static void mac80211_hwsim_free(void)
+static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
+				       const struct ieee80211_regdomain *regd,
+				       bool reg_strict)
 {
-	struct list_head tmplist, *i, *tmp;
-	struct mac80211_hwsim_data *data, *tmpdata;
-
-	INIT_LIST_HEAD(&tmplist);
+	int err;
+	u8 addr[ETH_ALEN];
+	struct mac80211_hwsim_data *data;
+	struct ieee80211_hw *hw;
+	enum ieee80211_band band;
+	const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
+	int idx;
 
 	spin_lock_bh(&hwsim_radio_lock);
-	list_for_each_safe(i, tmp, &hwsim_radios)
-		list_move(i, &tmplist);
+	idx = hwsim_radio_idx++;
 	spin_unlock_bh(&hwsim_radio_lock);
 
-	list_for_each_entry_safe(data, tmpdata, &tmplist, list) {
-		debugfs_remove(data->debugfs_group);
-		debugfs_remove(data->debugfs_ps);
-		debugfs_remove(data->debugfs);
-		ieee80211_unregister_hw(data->hw);
-		device_release_driver(data->dev);
-		device_unregister(data->dev);
-		ieee80211_free_hw(data->hw);
+	if (channels > 1)
+		ops = &mac80211_hwsim_mchan_ops;
+	hw = ieee80211_alloc_hw(sizeof(*data), ops);
+	if (!hw) {
+		printk(KERN_DEBUG "mac80211_hwsim: ieee80211_alloc_hw failed\n");
+		err = -ENOMEM;
+		goto failed;
+	}
+	data = hw->priv;
+	data->hw = hw;
+
+	data->dev = device_create(hwsim_class, NULL, 0, hw, "hwsim%d", idx);
+	if (IS_ERR(data->dev)) {
+		printk(KERN_DEBUG
+		       "mac80211_hwsim: device_create failed (%ld)\n",
+		       PTR_ERR(data->dev));
+		err = -ENOMEM;
+		goto failed_drvdata;
+	}
+	data->dev->driver = &mac80211_hwsim_driver.driver;
+	err = device_bind_driver(data->dev);
+	if (err != 0) {
+		printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n",
+		       err);
+		goto failed_hw;
 	}
-	class_destroy(hwsim_class);
-}
-
-static struct platform_driver mac80211_hwsim_driver = {
-	.driver = {
-		.name = "mac80211_hwsim",
-		.owner = THIS_MODULE,
-	},
-};
-
-static const struct net_device_ops hwsim_netdev_ops = {
-	.ndo_start_xmit 	= hwsim_mon_xmit,
-	.ndo_change_mtu		= eth_change_mtu,
-	.ndo_set_mac_address 	= eth_mac_addr,
-	.ndo_validate_addr	= eth_validate_addr,
-};
-
-static void hwsim_mon_setup(struct net_device *dev)
-{
-	dev->netdev_ops = &hwsim_netdev_ops;
-	dev->destructor = free_netdev;
-	ether_setup(dev);
-	dev->tx_queue_len = 0;
-	dev->type = ARPHRD_IEEE80211_RADIOTAP;
-	memset(dev->dev_addr, 0, ETH_ALEN);
-	dev->dev_addr[0] = 0x12;
-}
 
+	skb_queue_head_init(&data->pending);
 
-static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
-{
-	struct mac80211_hwsim_data *data = dat;
-	struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
-	struct sk_buff *skb;
-	struct ieee80211_pspoll *pspoll;
+	SET_IEEE80211_DEV(hw, data->dev);
+	memset(addr, 0, ETH_ALEN);
+	addr[0] = 0x02;
+	addr[3] = idx >> 8;
+	addr[4] = idx;
+	memcpy(data->addresses[0].addr, addr, ETH_ALEN);
+	memcpy(data->addresses[1].addr, addr, ETH_ALEN);
+	data->addresses[1].addr[0] |= 0x40;
+	hw->wiphy->n_addresses = 2;
+	hw->wiphy->addresses = data->addresses;
+
+	data->channels = channels;
+	data->idx = idx;
+
+	if (data->channels > 1) {
+		hw->wiphy->max_scan_ssids = 255;
+		hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+		hw->wiphy->max_remain_on_channel_duration = 1000;
+		/* For channels > 1 DFS is not allowed */
+		hw->wiphy->n_iface_combinations = 1;
+		hw->wiphy->iface_combinations = &data->if_combination;
+		data->if_combination = hwsim_if_comb[0];
+		data->if_combination.num_different_channels = data->channels;
+	} else {
+		hw->wiphy->iface_combinations = hwsim_if_comb;
+		hw->wiphy->n_iface_combinations = ARRAY_SIZE(hwsim_if_comb);
+	}
 
-	if (!vp->assoc)
-		return;
+	INIT_DELAYED_WORK(&data->roc_done, hw_roc_done);
+	INIT_DELAYED_WORK(&data->hw_scan, hw_scan_work);
+
+	hw->queues = 5;
+	hw->offchannel_tx_hw_queue = 4;
+	hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+				     BIT(NL80211_IFTYPE_AP) |
+				     BIT(NL80211_IFTYPE_P2P_CLIENT) |
+				     BIT(NL80211_IFTYPE_P2P_GO) |
+				     BIT(NL80211_IFTYPE_ADHOC) |
+				     BIT(NL80211_IFTYPE_MESH_POINT) |
+				     BIT(NL80211_IFTYPE_P2P_DEVICE);
+
+	hw->flags = IEEE80211_HW_MFP_CAPABLE |
+		    IEEE80211_HW_SIGNAL_DBM |
+		    IEEE80211_HW_SUPPORTS_STATIC_SMPS |
+		    IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+		    IEEE80211_HW_AMPDU_AGGREGATION |
+		    IEEE80211_HW_WANT_MONITOR_VIF |
+		    IEEE80211_HW_QUEUE_CONTROL |
+		    IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+	if (rctbl)
+		hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
+
+	hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
+			    WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+			    WIPHY_FLAG_AP_UAPSD;
+	hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+
+	/* ask mac80211 to reserve space for magic */
+	hw->vif_data_size = sizeof(struct hwsim_vif_priv);
+	hw->sta_data_size = sizeof(struct hwsim_sta_priv);
+	hw->chanctx_data_size = sizeof(struct hwsim_chanctx_priv);
+
+	memcpy(data->channels_2ghz, hwsim_channels_2ghz,
+		sizeof(hwsim_channels_2ghz));
+	memcpy(data->channels_5ghz, hwsim_channels_5ghz,
+		sizeof(hwsim_channels_5ghz));
+	memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates));
+
+	for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+		struct ieee80211_supported_band *sband = &data->bands[band];
+		switch (band) {
+		case IEEE80211_BAND_2GHZ:
+			sband->channels = data->channels_2ghz;
+			sband->n_channels = ARRAY_SIZE(hwsim_channels_2ghz);
+			sband->bitrates = data->rates;
+			sband->n_bitrates = ARRAY_SIZE(hwsim_rates);
+			break;
+		case IEEE80211_BAND_5GHZ:
+			sband->channels = data->channels_5ghz;
+			sband->n_channels = ARRAY_SIZE(hwsim_channels_5ghz);
+			sband->bitrates = data->rates + 4;
+			sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4;
+			break;
+		default:
+			continue;
+		}
 
-	wiphy_debug(data->hw->wiphy,
-		    "%s: send PS-Poll to %pM for aid %d\n",
-		    __func__, vp->bssid, vp->aid);
+		sband->ht_cap.ht_supported = true;
+		sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+				    IEEE80211_HT_CAP_GRN_FLD |
+				    IEEE80211_HT_CAP_SGI_40 |
+				    IEEE80211_HT_CAP_DSSSCCK40;
+		sband->ht_cap.ampdu_factor = 0x3;
+		sband->ht_cap.ampdu_density = 0x6;
+		memset(&sband->ht_cap.mcs, 0,
+		       sizeof(sband->ht_cap.mcs));
+		sband->ht_cap.mcs.rx_mask[0] = 0xff;
+		sband->ht_cap.mcs.rx_mask[1] = 0xff;
+		sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+
+		hw->wiphy->bands[band] = sband;
+
+		sband->vht_cap.vht_supported = true;
+		sband->vht_cap.cap =
+			IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+			IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
+			IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+			IEEE80211_VHT_CAP_RXLDPC |
+			IEEE80211_VHT_CAP_SHORT_GI_80 |
+			IEEE80211_VHT_CAP_SHORT_GI_160 |
+			IEEE80211_VHT_CAP_TXSTBC |
+			IEEE80211_VHT_CAP_RXSTBC_1 |
+			IEEE80211_VHT_CAP_RXSTBC_2 |
+			IEEE80211_VHT_CAP_RXSTBC_3 |
+			IEEE80211_VHT_CAP_RXSTBC_4 |
+			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+		sband->vht_cap.vht_mcs.rx_mcs_map =
+			cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_8 << 0 |
+				    IEEE80211_VHT_MCS_SUPPORT_0_8 << 2 |
+				    IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
+				    IEEE80211_VHT_MCS_SUPPORT_0_8 << 6 |
+				    IEEE80211_VHT_MCS_SUPPORT_0_8 << 8 |
+				    IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 |
+				    IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 |
+				    IEEE80211_VHT_MCS_SUPPORT_0_8 << 14);
+		sband->vht_cap.vht_mcs.tx_mcs_map =
+			sband->vht_cap.vht_mcs.rx_mcs_map;
+	}
 
-	skb = dev_alloc_skb(sizeof(*pspoll));
-	if (!skb)
-		return;
-	pspoll = (void *) skb_put(skb, sizeof(*pspoll));
-	pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
-					    IEEE80211_STYPE_PSPOLL |
-					    IEEE80211_FCTL_PM);
-	pspoll->aid = cpu_to_le16(0xc000 | vp->aid);
-	memcpy(pspoll->bssid, vp->bssid, ETH_ALEN);
-	memcpy(pspoll->ta, mac, ETH_ALEN);
+	/* By default all radios belong to the first group */
+	data->group = 1;
+	mutex_init(&data->mutex);
 
-	rcu_read_lock();
-	mac80211_hwsim_tx_frame(data->hw, skb,
-				rcu_dereference(vif->chanctx_conf)->def.chan);
-	rcu_read_unlock();
-}
+	/* Enable frame retransmissions for lossy channels */
+	hw->max_rates = 4;
+	hw->max_rate_tries = 11;
 
-static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
-				struct ieee80211_vif *vif, int ps)
-{
-	struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
-	struct sk_buff *skb;
-	struct ieee80211_hdr *hdr;
+	if (reg_strict)
+		hw->wiphy->regulatory_flags |= REGULATORY_STRICT_REG;
+	if (regd) {
+		hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
+		wiphy_apply_custom_regulatory(hw->wiphy, regd);
+		/* give the regulatory workqueue a chance to run */
+		schedule_timeout_interruptible(1);
+	}
 
-	if (!vp->assoc)
-		return;
+	err = ieee80211_register_hw(hw);
+	if (err < 0) {
+		printk(KERN_DEBUG "mac80211_hwsim: ieee80211_register_hw failed (%d)\n",
+		       err);
+		goto failed_hw;
+	}
 
-	wiphy_debug(data->hw->wiphy,
-		    "%s: send data::nullfunc to %pM ps=%d\n",
-		    __func__, vp->bssid, ps);
+	wiphy_debug(hw->wiphy, "hwaddr %pM registered\n", hw->wiphy->perm_addr);
 
-	skb = dev_alloc_skb(sizeof(*hdr));
-	if (!skb)
-		return;
-	hdr = (void *) skb_put(skb, sizeof(*hdr) - ETH_ALEN);
-	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
-					 IEEE80211_STYPE_NULLFUNC |
-					 (ps ? IEEE80211_FCTL_PM : 0));
-	hdr->duration_id = cpu_to_le16(0);
-	memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
-	memcpy(hdr->addr2, mac, ETH_ALEN);
-	memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
+	if (reg_alpha2)
+		regulatory_hint(hw->wiphy, reg_alpha2);
 
-	rcu_read_lock();
-	mac80211_hwsim_tx_frame(data->hw, skb,
-				rcu_dereference(vif->chanctx_conf)->def.chan);
-	rcu_read_unlock();
-}
+	data->debugfs = debugfs_create_dir("hwsim", hw->wiphy->debugfsdir);
+	debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps);
+	debugfs_create_file("group", 0666, data->debugfs, data,
+			    &hwsim_fops_group);
+	if (data->channels == 1)
+		debugfs_create_file("dfs_simulate_radar", 0222,
+				    data->debugfs,
+				    data, &hwsim_simulate_radar);
 
+	tasklet_hrtimer_init(&data->beacon_timer,
+			     mac80211_hwsim_beacon,
+			     CLOCK_MONOTONIC_RAW, HRTIMER_MODE_ABS);
 
-static void hwsim_send_nullfunc_ps(void *dat, u8 *mac,
-				   struct ieee80211_vif *vif)
-{
-	struct mac80211_hwsim_data *data = dat;
-	hwsim_send_nullfunc(data, mac, vif, 1);
-}
+	spin_lock_bh(&hwsim_radio_lock);
+	list_add_tail(&data->list, &hwsim_radios);
+	spin_unlock_bh(&hwsim_radio_lock);
 
+	return idx;
 
-static void hwsim_send_nullfunc_no_ps(void *dat, u8 *mac,
-				      struct ieee80211_vif *vif)
-{
-	struct mac80211_hwsim_data *data = dat;
-	hwsim_send_nullfunc(data, mac, vif, 0);
+failed_hw:
+	device_unregister(data->dev);
+failed_drvdata:
+	ieee80211_free_hw(hw);
+failed:
+	return err;
 }
 
-
-static int hwsim_fops_ps_read(void *dat, u64 *val)
+static void mac80211_hwsim_destroy_radio(struct mac80211_hwsim_data *data)
 {
-	struct mac80211_hwsim_data *data = dat;
-	*val = data->ps;
-	return 0;
+	debugfs_remove_recursive(data->debugfs);
+	ieee80211_unregister_hw(data->hw);
+	device_release_driver(data->dev);
+	device_unregister(data->dev);
+	ieee80211_free_hw(data->hw);
 }
 
-static int hwsim_fops_ps_write(void *dat, u64 val)
+static void mac80211_hwsim_free(void)
 {
-	struct mac80211_hwsim_data *data = dat;
-	enum ps_mode old_ps;
-
-	if (val != PS_DISABLED && val != PS_ENABLED && val != PS_AUTO_POLL &&
-	    val != PS_MANUAL_POLL)
-		return -EINVAL;
-
-	old_ps = data->ps;
-	data->ps = val;
+	struct mac80211_hwsim_data *data;
 
-	if (val == PS_MANUAL_POLL) {
-		ieee80211_iterate_active_interfaces(data->hw,
-						    IEEE80211_IFACE_ITER_NORMAL,
-						    hwsim_send_ps_poll, data);
-		data->ps_poll_pending = true;
-	} else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
-		ieee80211_iterate_active_interfaces(data->hw,
-						    IEEE80211_IFACE_ITER_NORMAL,
-						    hwsim_send_nullfunc_ps,
-						    data);
-	} else if (old_ps != PS_DISABLED && val == PS_DISABLED) {
-		ieee80211_iterate_active_interfaces(data->hw,
-						    IEEE80211_IFACE_ITER_NORMAL,
-						    hwsim_send_nullfunc_no_ps,
-						    data);
+	spin_lock_bh(&hwsim_radio_lock);
+	while ((data = list_first_entry_or_null(&hwsim_radios,
+						struct mac80211_hwsim_data,
+						list))) {
+		list_del(&data->list);
+		spin_unlock_bh(&hwsim_radio_lock);
+		mac80211_hwsim_destroy_radio(data);
+		spin_lock_bh(&hwsim_radio_lock);
 	}
-
-	return 0;
+	spin_unlock_bh(&hwsim_radio_lock);
+	class_destroy(hwsim_class);
 }
 
-DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write,
-			"%llu\n");
-
-
-static int hwsim_fops_group_read(void *dat, u64 *val)
-{
-	struct mac80211_hwsim_data *data = dat;
-	*val = data->group;
-	return 0;
-}
+static const struct net_device_ops hwsim_netdev_ops = {
+	.ndo_start_xmit 	= hwsim_mon_xmit,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
 
-static int hwsim_fops_group_write(void *dat, u64 val)
+static void hwsim_mon_setup(struct net_device *dev)
 {
-	struct mac80211_hwsim_data *data = dat;
-	data->group = val;
-	return 0;
+	dev->netdev_ops = &hwsim_netdev_ops;
+	dev->destructor = free_netdev;
+	ether_setup(dev);
+	dev->tx_queue_len = 0;
+	dev->type = ARPHRD_IEEE80211_RADIOTAP;
+	memset(dev->dev_addr, 0, ETH_ALEN);
+	dev->dev_addr[0] = 0x12;
 }
 
-DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group,
-			hwsim_fops_group_read, hwsim_fops_group_write,
-			"%llx\n");
-
-static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(
-			     struct mac_address *addr)
+static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr)
 {
 	struct mac80211_hwsim_data *data;
 	bool _found = false;
 
 	spin_lock_bh(&hwsim_radio_lock);
 	list_for_each_entry(data, &hwsim_radios, list) {
-		if (memcmp(data->addresses[1].addr, addr,
-			  sizeof(struct mac_address)) == 0) {
+		if (memcmp(data->addresses[1].addr, addr, ETH_ALEN) == 0) {
 			_found = true;
 			break;
 		}
@@ -1952,27 +2238,26 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
 	struct hwsim_tx_rate *tx_attempts;
 	unsigned long ret_skb_ptr;
 	struct sk_buff *skb, *tmp;
-	struct mac_address *src;
+	const u8 *src;
 	unsigned int hwsim_flags;
-
 	int i;
 	bool found = false;
 
+	if (info->snd_portid != wmediumd_portid)
+		return -EINVAL;
+
 	if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
-	   !info->attrs[HWSIM_ATTR_FLAGS] ||
-	   !info->attrs[HWSIM_ATTR_COOKIE] ||
-	   !info->attrs[HWSIM_ATTR_TX_INFO])
+	    !info->attrs[HWSIM_ATTR_FLAGS] ||
+	    !info->attrs[HWSIM_ATTR_COOKIE] ||
+	    !info->attrs[HWSIM_ATTR_TX_INFO])
 		goto out;
 
-	src = (struct mac_address *)nla_data(
-				   info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]);
+	src = (void *)nla_data(info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]);
 	hwsim_flags = nla_get_u32(info->attrs[HWSIM_ATTR_FLAGS]);
-
 	ret_skb_ptr = nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]);
 
 	data2 = get_hwsim_data_ref_from_addr(src);
-
-	if (data2 == NULL)
+	if (!data2)
 		goto out;
 
 	/* look for the skb matching the cookie passed back from user */
@@ -2029,38 +2314,37 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
 
 	struct mac80211_hwsim_data *data2;
 	struct ieee80211_rx_status rx_status;
-	struct mac_address *dst;
+	const u8 *dst;
 	int frame_data_len;
-	char *frame_data;
+	void *frame_data;
 	struct sk_buff *skb = NULL;
 
+	if (info->snd_portid != wmediumd_portid)
+		return -EINVAL;
+
 	if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] ||
 	    !info->attrs[HWSIM_ATTR_FRAME] ||
 	    !info->attrs[HWSIM_ATTR_RX_RATE] ||
 	    !info->attrs[HWSIM_ATTR_SIGNAL])
 		goto out;
 
-	dst = (struct mac_address *)nla_data(
-				   info->attrs[HWSIM_ATTR_ADDR_RECEIVER]);
-
+	dst = (void *)nla_data(info->attrs[HWSIM_ATTR_ADDR_RECEIVER]);
 	frame_data_len = nla_len(info->attrs[HWSIM_ATTR_FRAME]);
-	frame_data = (char *)nla_data(info->attrs[HWSIM_ATTR_FRAME]);
+	frame_data = (void *)nla_data(info->attrs[HWSIM_ATTR_FRAME]);
 
 	/* Allocate new skb here */
 	skb = alloc_skb(frame_data_len, GFP_KERNEL);
 	if (skb == NULL)
 		goto err;
 
-	if (frame_data_len <= IEEE80211_MAX_DATA_LEN) {
-		/* Copy the data */
-		memcpy(skb_put(skb, frame_data_len), frame_data,
-		       frame_data_len);
-	} else
+	if (frame_data_len > IEEE80211_MAX_DATA_LEN)
 		goto err;
 
-	data2 = get_hwsim_data_ref_from_addr(dst);
+	/* Copy the data */
+	memcpy(skb_put(skb, frame_data_len), frame_data, frame_data_len);
 
-	if (data2 == NULL)
+	data2 = get_hwsim_data_ref_from_addr(dst);
+	if (!data2)
 		goto out;
 
 	/* check if radio is configured properly */
@@ -2068,7 +2352,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
 	if (data2->idle || !data2->started)
 		goto out;
 
-	/*A frame is received from user space*/
+	/* A frame is received from user space */
 	memset(&rx_status, 0, sizeof(rx_status));
 	rx_status.freq = data2->channel->center_freq;
 	rx_status.band = data2->channel->band;
@@ -2090,8 +2374,24 @@ out:
 static int hwsim_register_received_nl(struct sk_buff *skb_2,
 				      struct genl_info *info)
 {
-	if (info == NULL)
-		goto out;
+	struct mac80211_hwsim_data *data;
+	int chans = 1;
+
+	spin_lock_bh(&hwsim_radio_lock);
+	list_for_each_entry(data, &hwsim_radios, list)
+		chans = max(chans, data->channels);
+	spin_unlock_bh(&hwsim_radio_lock);
+
+	/* In the future we should revise the userspace API and allow it
+	 * to set a flag that it does support multi-channel, then we can
+	 * let this pass conditionally on the flag.
+	 * For current userspace, prohibit it since it won't work right.
+	 */
+	if (chans > 1)
+		return -EOPNOTSUPP;
+
+	if (wmediumd_portid)
+		return -EBUSY;
 
 	wmediumd_portid = info->snd_portid;
 
@@ -2099,9 +2399,53 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
 	       "switching to wmediumd mode with pid %d\n", info->snd_portid);
 
 	return 0;
-out:
-	printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
-	return -EINVAL;
+}
+
+static int hwsim_create_radio_nl(struct sk_buff *msg, struct genl_info *info)
+{
+	unsigned int chans = channels;
+	const char *alpha2 = NULL;
+	const struct ieee80211_regdomain *regd = NULL;
+	bool reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
+
+	if (info->attrs[HWSIM_ATTR_CHANNELS])
+		chans = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
+
+	if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2])
+		alpha2 = nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]);
+
+	if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) {
+		u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]);
+
+		if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom))
+			return -EINVAL;
+		regd = hwsim_world_regdom_custom[idx];
+	}
+
+	return mac80211_hwsim_create_radio(chans, alpha2, regd, reg_strict);
+}
+
+static int hwsim_destroy_radio_nl(struct sk_buff *msg, struct genl_info *info)
+{
+	struct mac80211_hwsim_data *data;
+	int idx;
+
+	if (!info->attrs[HWSIM_ATTR_RADIO_ID])
+		return -EINVAL;
+	idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
+
+	spin_lock_bh(&hwsim_radio_lock);
+	list_for_each_entry(data, &hwsim_radios, list) {
+		if (data->idx != idx)
+			continue;
+		list_del(&data->list);
+		spin_unlock_bh(&hwsim_radio_lock);
+		mac80211_hwsim_destroy_radio(data);
+		return 0;
+	}
+	spin_unlock_bh(&hwsim_radio_lock);
+
+	return -ENODEV;
 }
 
 /* Generic Netlink operations array */
@@ -2122,6 +2466,18 @@ static const struct genl_ops hwsim_ops[] = {
 		.policy = hwsim_genl_policy,
 		.doit = hwsim_tx_info_frame_received_nl,
 	},
+	{
+		.cmd = HWSIM_CMD_CREATE_RADIO,
+		.policy = hwsim_genl_policy,
+		.doit = hwsim_create_radio_nl,
+		.flags = GENL_ADMIN_PERM,
+	},
+	{
+		.cmd = HWSIM_CMD_DESTROY_RADIO,
+		.policy = hwsim_genl_policy,
+		.doit = hwsim_destroy_radio_nl,
+		.flags = GENL_ADMIN_PERM,
+	},
 };
 
 static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
@@ -2150,10 +2506,6 @@ static int hwsim_init_netlink(void)
 {
 	int rc;
 
-	/* userspace test API hasn't been adjusted for multi-channel */
-	if (channels > 1)
-		return 0;
-
 	printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
 
 	rc = genl_register_family_with_ops(&hwsim_genl_family, hwsim_ops);
@@ -2173,77 +2525,36 @@ failure:
 
 static void hwsim_exit_netlink(void)
 {
-	int ret;
-
-	/* userspace test API hasn't been adjusted for multi-channel */
-	if (channels > 1)
-		return;
-
-	printk(KERN_INFO "mac80211_hwsim: closing netlink\n");
 	/* unregister the notifier */
 	netlink_unregister_notifier(&hwsim_netlink_notifier);
 	/* unregister the family */
-	ret = genl_unregister_family(&hwsim_genl_family);
-	if (ret)
-		printk(KERN_DEBUG "mac80211_hwsim: "
-		       "unregister family %i\n", ret);
+	genl_unregister_family(&hwsim_genl_family);
 }
 
-static const struct ieee80211_iface_limit hwsim_if_limits[] = {
-	{ .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
-	{ .max = 2048,  .types = BIT(NL80211_IFTYPE_STATION) |
-				 BIT(NL80211_IFTYPE_P2P_CLIENT) |
-#ifdef CONFIG_MAC80211_MESH
-				 BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
-				 BIT(NL80211_IFTYPE_AP) |
-				 BIT(NL80211_IFTYPE_P2P_GO) },
-	{ .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
-};
-
-static struct ieee80211_iface_combination hwsim_if_comb = {
-	.limits = hwsim_if_limits,
-	.n_limits = ARRAY_SIZE(hwsim_if_limits),
-	.max_interfaces = 2048,
-	.num_different_channels = 1,
-};
-
 static int __init init_mac80211_hwsim(void)
 {
-	int i, err = 0;
-	u8 addr[ETH_ALEN];
-	struct mac80211_hwsim_data *data;
-	struct ieee80211_hw *hw;
-	enum ieee80211_band band;
+	int i, err;
 
-	if (radios < 1 || radios > 100)
+	if (radios < 0 || radios > 100)
 		return -EINVAL;
 
 	if (channels < 1)
 		return -EINVAL;
 
-	if (channels > 1) {
-		hwsim_if_comb.num_different_channels = channels;
-		mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
-		mac80211_hwsim_ops.cancel_hw_scan =
-			mac80211_hwsim_cancel_hw_scan;
-		mac80211_hwsim_ops.sw_scan_start = NULL;
-		mac80211_hwsim_ops.sw_scan_complete = NULL;
-		mac80211_hwsim_ops.remain_on_channel =
-			mac80211_hwsim_roc;
-		mac80211_hwsim_ops.cancel_remain_on_channel =
-			mac80211_hwsim_croc;
-		mac80211_hwsim_ops.add_chanctx =
-			mac80211_hwsim_add_chanctx;
-		mac80211_hwsim_ops.remove_chanctx =
-			mac80211_hwsim_remove_chanctx;
-		mac80211_hwsim_ops.change_chanctx =
-			mac80211_hwsim_change_chanctx;
-		mac80211_hwsim_ops.assign_vif_chanctx =
-			mac80211_hwsim_assign_vif_chanctx;
-		mac80211_hwsim_ops.unassign_vif_chanctx =
-			mac80211_hwsim_unassign_vif_chanctx;
-	}
+	mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
+	mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
+	mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
+	mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
+	mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
+	mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
+	mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
+	mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
+	mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
+	mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
+	mac80211_hwsim_mchan_ops.assign_vif_chanctx =
+		mac80211_hwsim_assign_vif_chanctx;
+	mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
+		mac80211_hwsim_unassign_vif_chanctx;
 
 	spin_lock_init(&hwsim_radio_lock);
 	INIT_LIST_HEAD(&hwsim_radios);
@@ -2255,348 +2566,116 @@ static int __init init_mac80211_hwsim(void)
 	hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
 	if (IS_ERR(hwsim_class)) {
 		err = PTR_ERR(hwsim_class);
-		goto failed_unregister_driver;
+		goto out_unregister_driver;
 	}
 
-	memset(addr, 0, ETH_ALEN);
-	addr[0] = 0x02;
-
 	for (i = 0; i < radios; i++) {
-		printk(KERN_DEBUG "mac80211_hwsim: Initializing radio %d\n",
-		       i);
-		hw = ieee80211_alloc_hw(sizeof(*data), &mac80211_hwsim_ops);
-		if (!hw) {
-			printk(KERN_DEBUG "mac80211_hwsim: ieee80211_alloc_hw "
-			       "failed\n");
-			err = -ENOMEM;
-			goto failed;
-		}
-		data = hw->priv;
-		data->hw = hw;
-
-		data->dev = device_create(hwsim_class, NULL, 0, hw,
-					  "hwsim%d", i);
-		if (IS_ERR(data->dev)) {
-			printk(KERN_DEBUG
-			       "mac80211_hwsim: device_create failed (%ld)\n",
-			       PTR_ERR(data->dev));
-			err = -ENOMEM;
-			goto failed_drvdata;
-		}
-		data->dev->driver = &mac80211_hwsim_driver.driver;
-		err = device_bind_driver(data->dev);
-		if (err != 0) {
-			printk(KERN_DEBUG
-			       "mac80211_hwsim: device_bind_driver failed (%d)\n",
-			       err);
-			goto failed_hw;
-		}
-
-		skb_queue_head_init(&data->pending);
+		const char *reg_alpha2 = NULL;
+		const struct ieee80211_regdomain *regd = NULL;
+		bool reg_strict = false;
 
-		SET_IEEE80211_DEV(hw, data->dev);
-		addr[3] = i >> 8;
-		addr[4] = i;
-		memcpy(data->addresses[0].addr, addr, ETH_ALEN);
-		memcpy(data->addresses[1].addr, addr, ETH_ALEN);
-		data->addresses[1].addr[0] |= 0x40;
-		hw->wiphy->n_addresses = 2;
-		hw->wiphy->addresses = data->addresses;
-
-		hw->wiphy->iface_combinations = &hwsim_if_comb;
-		hw->wiphy->n_iface_combinations = 1;
-
-		if (channels > 1) {
-			hw->wiphy->max_scan_ssids = 255;
-			hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
-			hw->wiphy->max_remain_on_channel_duration = 1000;
-		}
-
-		INIT_DELAYED_WORK(&data->roc_done, hw_roc_done);
-		INIT_DELAYED_WORK(&data->hw_scan, hw_scan_work);
-
-		hw->channel_change_time = 1;
-		hw->queues = 5;
-		hw->offchannel_tx_hw_queue = 4;
-		hw->wiphy->interface_modes =
-			BIT(NL80211_IFTYPE_STATION) |
-			BIT(NL80211_IFTYPE_AP) |
-			BIT(NL80211_IFTYPE_P2P_CLIENT) |
-			BIT(NL80211_IFTYPE_P2P_GO) |
-			BIT(NL80211_IFTYPE_ADHOC) |
-			BIT(NL80211_IFTYPE_MESH_POINT) |
-			BIT(NL80211_IFTYPE_P2P_DEVICE);
-
-		hw->flags = IEEE80211_HW_MFP_CAPABLE |
-			    IEEE80211_HW_SIGNAL_DBM |
-			    IEEE80211_HW_SUPPORTS_STATIC_SMPS |
-			    IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
-			    IEEE80211_HW_AMPDU_AGGREGATION |
-			    IEEE80211_HW_WANT_MONITOR_VIF |
-			    IEEE80211_HW_QUEUE_CONTROL;
-		if (rctbl)
-			hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
-
-		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
-				    WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
-				    WIPHY_FLAG_AP_UAPSD;
-		hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
-
-		/* ask mac80211 to reserve space for magic */
-		hw->vif_data_size = sizeof(struct hwsim_vif_priv);
-		hw->sta_data_size = sizeof(struct hwsim_sta_priv);
-		hw->chanctx_data_size = sizeof(struct hwsim_chanctx_priv);
-
-		memcpy(data->channels_2ghz, hwsim_channels_2ghz,
-			sizeof(hwsim_channels_2ghz));
-		memcpy(data->channels_5ghz, hwsim_channels_5ghz,
-			sizeof(hwsim_channels_5ghz));
-		memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates));
-
-		for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
-			struct ieee80211_supported_band *sband = &data->bands[band];
-			switch (band) {
-			case IEEE80211_BAND_2GHZ:
-				sband->channels = data->channels_2ghz;
-				sband->n_channels =
-					ARRAY_SIZE(hwsim_channels_2ghz);
-				sband->bitrates = data->rates;
-				sband->n_bitrates = ARRAY_SIZE(hwsim_rates);
-				break;
-			case IEEE80211_BAND_5GHZ:
-				sband->channels = data->channels_5ghz;
-				sband->n_channels =
-					ARRAY_SIZE(hwsim_channels_5ghz);
-				sband->bitrates = data->rates + 4;
-				sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4;
-				break;
-			default:
-				continue;
-			}
-
-			sband->ht_cap.ht_supported = true;
-			sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
-				IEEE80211_HT_CAP_GRN_FLD |
-				IEEE80211_HT_CAP_SGI_40 |
-				IEEE80211_HT_CAP_DSSSCCK40;
-			sband->ht_cap.ampdu_factor = 0x3;
-			sband->ht_cap.ampdu_density = 0x6;
-			memset(&sband->ht_cap.mcs, 0,
-			       sizeof(sband->ht_cap.mcs));
-			sband->ht_cap.mcs.rx_mask[0] = 0xff;
-			sband->ht_cap.mcs.rx_mask[1] = 0xff;
-			sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
-
-			hw->wiphy->bands[band] = sband;
-
-			sband->vht_cap.vht_supported = true;
-			sband->vht_cap.cap =
-				IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
-				IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
-				IEEE80211_VHT_CAP_RXLDPC |
-				IEEE80211_VHT_CAP_SHORT_GI_80 |
-				IEEE80211_VHT_CAP_SHORT_GI_160 |
-				IEEE80211_VHT_CAP_TXSTBC |
-				IEEE80211_VHT_CAP_RXSTBC_1 |
-				IEEE80211_VHT_CAP_RXSTBC_2 |
-				IEEE80211_VHT_CAP_RXSTBC_3 |
-				IEEE80211_VHT_CAP_RXSTBC_4 |
-				IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
-			sband->vht_cap.vht_mcs.rx_mcs_map =
-				cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_8 << 0 |
-					    IEEE80211_VHT_MCS_SUPPORT_0_8 << 2 |
-					    IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
-					    IEEE80211_VHT_MCS_SUPPORT_0_8 << 6 |
-					    IEEE80211_VHT_MCS_SUPPORT_0_8 << 8 |
-					    IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 |
-					    IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 |
-					    IEEE80211_VHT_MCS_SUPPORT_0_8 << 14);
-			sband->vht_cap.vht_mcs.tx_mcs_map =
-				sband->vht_cap.vht_mcs.rx_mcs_map;
-		}
-		/* By default all radios are belonging to the first group */
-		data->group = 1;
-		mutex_init(&data->mutex);
-
-		/* Enable frame retransmissions for lossy channels */
-		hw->max_rates = 4;
-		hw->max_rate_tries = 11;
-
-		/* Work to be done prior to ieee80211_register_hw() */
 		switch (regtest) {
-		case HWSIM_REGTEST_DISABLED:
-		case HWSIM_REGTEST_DRIVER_REG_FOLLOW:
-		case HWSIM_REGTEST_DRIVER_REG_ALL:
 		case HWSIM_REGTEST_DIFF_COUNTRY:
-			/*
-			 * Nothing to be done for driver regulatory domain
-			 * hints prior to ieee80211_register_hw()
-			 */
-			break;
-		case HWSIM_REGTEST_WORLD_ROAM:
-			if (i == 0) {
-				hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
-				wiphy_apply_custom_regulatory(hw->wiphy,
-					&hwsim_world_regdom_custom_01);
-			}
-			break;
-		case HWSIM_REGTEST_CUSTOM_WORLD:
-			hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
-			wiphy_apply_custom_regulatory(hw->wiphy,
-				&hwsim_world_regdom_custom_01);
-			break;
-		case HWSIM_REGTEST_CUSTOM_WORLD_2:
-			if (i == 0) {
-				hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
-				wiphy_apply_custom_regulatory(hw->wiphy,
-					&hwsim_world_regdom_custom_01);
-			} else if (i == 1) {
-				hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
-				wiphy_apply_custom_regulatory(hw->wiphy,
-					&hwsim_world_regdom_custom_02);
-			}
-			break;
-		case HWSIM_REGTEST_STRICT_ALL:
-			hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
-			break;
-		case HWSIM_REGTEST_STRICT_FOLLOW:
-		case HWSIM_REGTEST_STRICT_AND_DRIVER_REG:
-			if (i == 0)
-				hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
-			break;
-		case HWSIM_REGTEST_ALL:
-			if (i == 0) {
-				hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
-				wiphy_apply_custom_regulatory(hw->wiphy,
-					&hwsim_world_regdom_custom_01);
-			} else if (i == 1) {
-				hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
-				wiphy_apply_custom_regulatory(hw->wiphy,
-					&hwsim_world_regdom_custom_02);
-			} else if (i == 4)
-				hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
-			break;
-		default:
-			break;
-		}
-
-		/* give the regulatory workqueue a chance to run */
-		if (regtest)
-			schedule_timeout_interruptible(1);
-		err = ieee80211_register_hw(hw);
-		if (err < 0) {
-			printk(KERN_DEBUG "mac80211_hwsim: "
-			       "ieee80211_register_hw failed (%d)\n", err);
-			goto failed_hw;
-		}
-
-		/* Work to be done after to ieee80211_register_hw() */
-		switch (regtest) {
-		case HWSIM_REGTEST_WORLD_ROAM:
-		case HWSIM_REGTEST_DISABLED:
+			if (i < ARRAY_SIZE(hwsim_alpha2s))
+				reg_alpha2 = hwsim_alpha2s[i];
 			break;
 		case HWSIM_REGTEST_DRIVER_REG_FOLLOW:
 			if (!i)
-				regulatory_hint(hw->wiphy, hwsim_alpha2s[0]);
+				reg_alpha2 = hwsim_alpha2s[0];
 			break;
-		case HWSIM_REGTEST_DRIVER_REG_ALL:
 		case HWSIM_REGTEST_STRICT_ALL:
-			regulatory_hint(hw->wiphy, hwsim_alpha2s[0]);
+			reg_strict = true;
+		case HWSIM_REGTEST_DRIVER_REG_ALL:
+			reg_alpha2 = hwsim_alpha2s[0];
 			break;
-		case HWSIM_REGTEST_DIFF_COUNTRY:
-			if (i < ARRAY_SIZE(hwsim_alpha2s))
-				regulatory_hint(hw->wiphy, hwsim_alpha2s[i]);
+		case HWSIM_REGTEST_WORLD_ROAM:
+			if (i == 0)
+				regd = &hwsim_world_regdom_custom_01;
 			break;
 		case HWSIM_REGTEST_CUSTOM_WORLD:
+			regd = &hwsim_world_regdom_custom_01;
+			break;
 		case HWSIM_REGTEST_CUSTOM_WORLD_2:
-			/*
-			 * Nothing to be done for custom world regulatory
-			 * domains after to ieee80211_register_hw
-			 */
+			if (i == 0)
+				regd = &hwsim_world_regdom_custom_01;
+			else if (i == 1)
+				regd = &hwsim_world_regdom_custom_02;
 			break;
 		case HWSIM_REGTEST_STRICT_FOLLOW:
-			if (i == 0)
-				regulatory_hint(hw->wiphy, hwsim_alpha2s[0]);
+			if (i == 0) {
+				reg_strict = true;
+				reg_alpha2 = hwsim_alpha2s[0];
+			}
 			break;
 		case HWSIM_REGTEST_STRICT_AND_DRIVER_REG:
-			if (i == 0)
-				regulatory_hint(hw->wiphy, hwsim_alpha2s[0]);
-			else if (i == 1)
-				regulatory_hint(hw->wiphy, hwsim_alpha2s[1]);
+			if (i == 0) {
+				reg_strict = true;
+				reg_alpha2 = hwsim_alpha2s[0];
+			} else if (i == 1) {
+				reg_alpha2 = hwsim_alpha2s[1];
+			}
 			break;
 		case HWSIM_REGTEST_ALL:
-			if (i == 2)
-				regulatory_hint(hw->wiphy, hwsim_alpha2s[0]);
-			else if (i == 3)
-				regulatory_hint(hw->wiphy, hwsim_alpha2s[1]);
-			else if (i == 4)
-				regulatory_hint(hw->wiphy, hwsim_alpha2s[2]);
+			switch (i) {
+			case 0:
+				regd = &hwsim_world_regdom_custom_01;
+				break;
+			case 1:
+				regd = &hwsim_world_regdom_custom_02;
+				break;
+			case 2:
+				reg_alpha2 = hwsim_alpha2s[0];
+				break;
+			case 3:
+				reg_alpha2 = hwsim_alpha2s[1];
+				break;
+			case 4:
+				reg_strict = true;
+				reg_alpha2 = hwsim_alpha2s[2];
+				break;
+			}
 			break;
 		default:
 			break;
 		}
 
-		wiphy_debug(hw->wiphy, "hwaddr %pm registered\n",
-			    hw->wiphy->perm_addr);
-
-		data->debugfs = debugfs_create_dir("hwsim",
-						   hw->wiphy->debugfsdir);
-		data->debugfs_ps = debugfs_create_file("ps", 0666,
-						       data->debugfs, data,
-						       &hwsim_fops_ps);
-		data->debugfs_group = debugfs_create_file("group", 0666,
-							data->debugfs, data,
-							&hwsim_fops_group);
-
-		tasklet_hrtimer_init(&data->beacon_timer,
-				     mac80211_hwsim_beacon,
-				     CLOCK_REALTIME, HRTIMER_MODE_ABS);
-
-		list_add_tail(&data->list, &hwsim_radios);
+		err = mac80211_hwsim_create_radio(channels, reg_alpha2,
+						  regd, reg_strict);
+		if (err < 0)
+			goto out_free_radios;
 	}
 
 	hwsim_mon = alloc_netdev(0, "hwsim%d", hwsim_mon_setup);
 	if (hwsim_mon == NULL) {
 		err = -ENOMEM;
-		goto failed;
+		goto out_free_radios;
 	}
 
 	rtnl_lock();
-
 	err = dev_alloc_name(hwsim_mon, hwsim_mon->name);
-	if (err < 0)
-		goto failed_mon;
-
+	if (err < 0) {
+		rtnl_unlock();
+		goto out_free_radios;
+	}
 
 	err = register_netdevice(hwsim_mon);
-	if (err < 0)
-		goto failed_mon;
-
+	if (err < 0) {
+		rtnl_unlock();
+		goto out_free_mon;
+	}
 	rtnl_unlock();
 
 	err = hwsim_init_netlink();
 	if (err < 0)
-		goto failed_nl;
+		goto out_free_mon;
 
 	return 0;
 
-failed_nl:
-	printk(KERN_DEBUG "mac_80211_hwsim: failed initializing netlink\n");
-	return err;
-
-failed_mon:
-	rtnl_unlock();
+out_free_mon:
 	free_netdev(hwsim_mon);
+out_free_radios:
 	mac80211_hwsim_free();
-	return err;
-
-failed_hw:
-	device_unregister(data->dev);
-failed_drvdata:
-	ieee80211_free_hw(hw);
-failed:
-	mac80211_hwsim_free();
-failed_unregister_driver:
+out_unregister_driver:
 	platform_driver_unregister(&mac80211_hwsim_driver);
 	return err;
 }
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index afaad5a443b6..2747cce5a269 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -65,6 +65,9 @@ enum hwsim_tx_control_flags {
  * kernel, uses:
  *	%HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_FLAGS,
  *	%HWSIM_ATTR_TX_INFO, %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
+ * @HWSIM_CMD_CREATE_RADIO: create a new radio with the given parameters,
+ *	returns the radio ID (>= 0) or negative on errors
+ * @HWSIM_CMD_DESTROY_RADIO: destroy a radio
  * @__HWSIM_CMD_MAX: enum limit
  */
 enum {
@@ -72,6 +75,8 @@ enum {
 	HWSIM_CMD_REGISTER,
 	HWSIM_CMD_FRAME,
 	HWSIM_CMD_TX_INFO_FRAME,
+	HWSIM_CMD_CREATE_RADIO,
+	HWSIM_CMD_DESTROY_RADIO,
 	__HWSIM_CMD_MAX,
 };
 #define HWSIM_CMD_MAX (_HWSIM_CMD_MAX - 1)
@@ -94,6 +99,14 @@ enum {
 	space
  * @HWSIM_ATTR_TX_INFO: ieee80211_tx_rate array
  * @HWSIM_ATTR_COOKIE: sk_buff cookie to identify the frame
+ * @HWSIM_ATTR_CHANNELS: u32 attribute used with the %HWSIM_CMD_CREATE_RADIO
+ *	command giving the number of channels supported by the new radio
+ * @HWSIM_ATTR_RADIO_ID: u32 attribute used with %HWSIM_CMD_DESTROY_RADIO
+ *	only to destroy a radio
+ * @HWSIM_ATTR_REG_HINT_ALPHA2: alpha2 for regulatoro driver hint
+ *	(nla string, length 2)
+ * @HWSIM_ATTR_REG_CUSTOM_REG: custom regulatory domain index (u32 attribute)
+ * @HWSIM_ATTR_REG_STRICT_REG: request REGULATORY_STRICT_REG (flag attribute)
  * @__HWSIM_ATTR_MAX: enum limit
  */
 
@@ -108,6 +121,11 @@ enum {
 	HWSIM_ATTR_SIGNAL,
 	HWSIM_ATTR_TX_INFO,
 	HWSIM_ATTR_COOKIE,
+	HWSIM_ATTR_CHANNELS,
+	HWSIM_ATTR_RADIO_ID,
+	HWSIM_ATTR_REG_HINT_ALPHA2,
+	HWSIM_ATTR_REG_CUSTOM_REG,
+	HWSIM_ATTR_REG_STRICT_REG,
 	__HWSIM_ATTR_MAX,
 };
 #define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 0b803c05cab3..6261f8c53d44 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -483,7 +483,7 @@ mwifiex_get_ba_tbl(struct mwifiex_private *priv, int tid, u8 *ra)
 
 	spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
 	list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
-		if (!memcmp(tx_ba_tsr_tbl->ra, ra, ETH_ALEN) &&
+		if (ether_addr_equal_unaligned(tx_ba_tsr_tbl->ra, ra) &&
 		    tx_ba_tsr_tbl->tid == tid) {
 			spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
 					       flags);
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 1214c587fd08..63211707f939 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -69,9 +69,9 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
 	memcpy(&tx_header->eth803_hdr, skb_src->data, dt_offset);
 
 	/* Copy SNAP header */
-	snap.snap_type =
-		le16_to_cpu(*(__le16 *) ((u8 *)skb_src->data + dt_offset));
-	dt_offset += sizeof(u16);
+	snap.snap_type = ((struct ethhdr *)skb_src->data)->h_proto;
+
+	dt_offset += sizeof(__be16);
 
 	memcpy(&tx_header->rfc1042_hdr, &snap, sizeof(struct rfc_1042_hdr));
 
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index f7ff4725506a..ecdf34505b54 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -31,12 +31,12 @@ config MWIFIEX_PCIE
 	  mwifiex_pcie.
 
 config MWIFIEX_USB
-	tristate "Marvell WiFi-Ex Driver for USB8797"
+	tristate "Marvell WiFi-Ex Driver for USB8797/8897"
 	depends on MWIFIEX && USB
 	select FW_LOADER
 	---help---
 	  This adds support for wireless adapters based on Marvell
-	  Avastar 88W8797 chipset with USB interface.
+	  8797/8897 chipset with USB interface.
 
 	  If you choose to build it as a module, it will be called
 	  mwifiex_usb.
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index aeaea0e3b4c4..8bfc07cd330e 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -50,24 +50,24 @@ static const struct ieee80211_regdomain mwifiex_world_regdom_custom = {
 		REG_RULE(2412-10, 2462+10, 40, 3, 20, 0),
 		/* Channel 12 - 13 */
 		REG_RULE(2467-10, 2472+10, 20, 3, 20,
-			 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+			 NL80211_RRF_NO_IR),
 		/* Channel 14 */
 		REG_RULE(2484-10, 2484+10, 20, 3, 20,
-			 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
+			 NL80211_RRF_NO_IR |
 			 NL80211_RRF_NO_OFDM),
 		/* Channel 36 - 48 */
 		REG_RULE(5180-10, 5240+10, 40, 3, 20,
-			 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+			 NL80211_RRF_NO_IR),
 		/* Channel 149 - 165 */
 		REG_RULE(5745-10, 5825+10, 40, 3, 20,
-			 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+			 NL80211_RRF_NO_IR),
 		/* Channel 52 - 64 */
 		REG_RULE(5260-10, 5320+10, 40, 3, 30,
-			 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
+			 NL80211_RRF_NO_IR |
 			 NL80211_RRF_DFS),
 		/* Channel 100 - 140 */
 		REG_RULE(5500-10, 5700+10, 40, 3, 30,
-			 NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
+			 NL80211_RRF_NO_IR |
 			 NL80211_RRF_DFS),
 	}
 };
@@ -184,10 +184,10 @@ mwifiex_form_mgmt_frame(struct sk_buff *skb, const u8 *buf, size_t len)
  */
 static int
 mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
-			 struct ieee80211_channel *chan, bool offchan,
-			 unsigned int wait, const u8 *buf, size_t len,
-			 bool no_cck, bool dont_wait_for_ack, u64 *cookie)
+			 struct cfg80211_mgmt_tx_params *params, u64 *cookie)
 {
+	const u8 *buf = params->buf;
+	size_t len = params->len;
 	struct sk_buff *skb;
 	u16 pkt_len;
 	const struct ieee80211_mgmt *mgmt;
@@ -222,6 +222,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 	tx_info = MWIFIEX_SKB_TXCB(skb);
 	tx_info->bss_num = priv->bss_num;
 	tx_info->bss_type = priv->bss_type;
+	tx_info->pkt_len = pkt_len;
 
 	mwifiex_form_mgmt_frame(skb, buf, len);
 	mwifiex_queue_tx_pkt(priv, skb);
@@ -537,23 +538,33 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy,
 				 struct regulatory_request *request)
 {
 	struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+	struct mwifiex_private *priv = mwifiex_get_priv(adapter,
+							MWIFIEX_BSS_ROLE_ANY);
 
 	wiphy_dbg(wiphy, "info: cfg80211 regulatory domain callback for %c%c\n",
 		  request->alpha2[0], request->alpha2[1]);
 
-	memcpy(adapter->country_code, request->alpha2, sizeof(request->alpha2));
-
 	switch (request->initiator) {
 	case NL80211_REGDOM_SET_BY_DRIVER:
 	case NL80211_REGDOM_SET_BY_CORE:
 	case NL80211_REGDOM_SET_BY_USER:
-		break;
-		/* Todo: apply driver specific changes in channel flags based
-		   on the request initiator if necessary. */
 	case NL80211_REGDOM_SET_BY_COUNTRY_IE:
 		break;
+	default:
+		wiphy_err(wiphy, "unknown regdom initiator: %d\n",
+			  request->initiator);
+		return;
+	}
+
+	/* Don't send world or same regdom info to firmware */
+	if (strncmp(request->alpha2, "00", 2) &&
+	    strncmp(request->alpha2, adapter->country_code,
+		    sizeof(request->alpha2))) {
+		memcpy(adapter->country_code, request->alpha2,
+		       sizeof(request->alpha2));
+		mwifiex_send_domain_info_cmd_fw(wiphy);
+		mwifiex_dnld_txpwr_table(priv);
 	}
-	mwifiex_send_domain_info_cmd_fw(wiphy);
 }
 
 /*
@@ -1170,10 +1181,10 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
 	else
 		bitmap_rates[1] = mask->control[band].legacy;
 
-	/* Fill MCS rates */
-	bitmap_rates[2] = mask->control[band].mcs[0];
+	/* Fill HT MCS rates */
+	bitmap_rates[2] = mask->control[band].ht_mcs[0];
 	if (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2)
-		bitmap_rates[2] |= mask->control[band].mcs[1] << 8;
+		bitmap_rates[2] |= mask->control[band].ht_mcs[1] << 8;
 
 	return mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
 				     HostCmd_ACT_GEN_SET, 0, bitmap_rates);
@@ -1968,7 +1979,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
 		user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
 		user_scan_cfg->chan_list[i].radio_type = chan->band;
 
-		if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+		if (chan->flags & IEEE80211_CHAN_NO_IR)
 			user_scan_cfg->chan_list[i].scan_type =
 						MWIFIEX_SCAN_TYPE_PASSIVE;
 		else
@@ -2438,7 +2449,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
 		       ETH_ALEN);
 		mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] =
 								ETH_ALEN;
-		mef_entry->filter[filt_num].offset = 14;
+		mef_entry->filter[filt_num].offset = 28;
 		mef_entry->filter[filt_num].filt_type = TYPE_EQ;
 		if (filt_num)
 			mef_entry->filter[filt_num].filt_action = TYPE_OR;
@@ -2666,6 +2677,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
 	struct wiphy *wiphy;
 	struct mwifiex_private *priv = adapter->priv[MWIFIEX_BSS_TYPE_STA];
 	u8 *country_code;
+	u32 thr, retry;
 
 	/* create a new wiphy for use with cfg80211 */
 	wiphy = wiphy_new(&mwifiex_cfg80211_ops,
@@ -2702,9 +2714,10 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
 	wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
 			WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
 			WIPHY_FLAG_AP_UAPSD |
-			WIPHY_FLAG_CUSTOM_REGULATORY |
-			WIPHY_FLAG_STRICT_REGULATORY |
 			WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+	wiphy->regulatory_flags |=
+			REGULATORY_CUSTOM_REG |
+			REGULATORY_STRICT_REG;
 
 	wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom);
 
@@ -2754,6 +2767,19 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
 				   country_code);
 	}
 
+	mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+			      HostCmd_ACT_GEN_GET, FRAG_THRESH_I, &thr);
+	wiphy->frag_threshold = thr;
+	mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+			      HostCmd_ACT_GEN_GET, RTS_THRESH_I, &thr);
+	wiphy->rts_threshold = thr;
+	mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+			      HostCmd_ACT_GEN_GET, SHORT_RETRY_LIM_I, &retry);
+	wiphy->retry_short = (u8) retry;
+	mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+			      HostCmd_ACT_GEN_GET, LONG_RETRY_LIM_I, &retry);
+	wiphy->retry_long = (u8) retry;
+
 	adapter->wiphy = wiphy;
 	return ret;
 }
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index e47f4e3012b8..1ddc8b2e3722 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -312,14 +312,14 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
 	}
 	if (GET_BSS_ROLE(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY))
 	    == MWIFIEX_BSS_ROLE_STA) {
-		if (!sleep_cfm_buf->resp_ctrl)
+		if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl))
 			/* Response is not needed for sleep
 			   confirm command */
 			adapter->ps_state = PS_STATE_SLEEP;
 		else
 			adapter->ps_state = PS_STATE_SLEEP_CFM;
 
-		if (!sleep_cfm_buf->resp_ctrl &&
+		if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl) &&
 		    (adapter->is_hs_configured &&
 		     !adapter->sleep_period.period)) {
 			adapter->pm_wakeup_card_req = true;
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 5c85d7803d00..3a21bd03d6db 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -130,6 +130,7 @@ struct mwifiex_txinfo {
 	u8 flags;
 	u8 bss_num;
 	u8 bss_type;
+	u32 pkt_len;
 };
 
 enum mwifiex_wmm_ac_e {
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index c8385ec77a86..5fa932d5f905 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -30,7 +30,7 @@ struct rfc_1042_hdr {
 	u8 llc_ssap;
 	u8 llc_ctrl;
 	u8 snap_oui[3];
-	u16 snap_type;
+	__be16 snap_type;
 };
 
 struct rx_packet_hdr {
@@ -226,7 +226,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 
 /* HW_SPEC fw_cap_info */
 
-#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(13)|BIT(14)))
+#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(12)|BIT(13)))
 
 #define GET_VHTCAP_CHWDSET(vht_cap_info)    ((vht_cap_info >> 2) & 0x3)
 #define GET_VHTNSSMCS(mcs_mapset, nss) ((mcs_mapset >> (2 * (nss - 1))) & 0x3)
@@ -468,8 +468,6 @@ enum P2P_MODES {
 #define MWIFIEX_CRITERIA_UNICAST	BIT(1)
 #define MWIFIEX_CRITERIA_MULTICAST	BIT(3)
 
-#define CFG_DATA_TYPE_CAL		2
-
 struct mwifiex_ie_types_header {
 	__le16 type;
 	__le16 len;
@@ -610,12 +608,12 @@ struct mwifiex_ie_types_tsf_timestamp {
 struct mwifiex_cf_param_set {
 	u8 cfp_cnt;
 	u8 cfp_period;
-	u16 cfp_max_duration;
-	u16 cfp_duration_remaining;
+	__le16 cfp_max_duration;
+	__le16 cfp_duration_remaining;
 } __packed;
 
 struct mwifiex_ibss_param_set {
-	u16 atim_window;
+	__le16 atim_window;
 } __packed;
 
 struct mwifiex_ie_types_ss_param_set {
@@ -627,7 +625,7 @@ struct mwifiex_ie_types_ss_param_set {
 } __packed;
 
 struct mwifiex_fh_param_set {
-	u16 dwell_time;
+	__le16 dwell_time;
 	u8 hop_set;
 	u8 hop_pattern;
 	u8 hop_index;
@@ -684,10 +682,10 @@ struct host_cmd_ds_802_11_key_material {
 } __packed;
 
 struct host_cmd_ds_gen {
-	u16 command;
-	u16 size;
-	u16 seq_num;
-	u16 result;
+	__le16 command;
+	__le16 size;
+	__le16 seq_num;
+	__le16 result;
 };
 
 #define S_DS_GEN        sizeof(struct host_cmd_ds_gen)
@@ -820,8 +818,8 @@ struct ieee_types_cf_param_set {
 	u8 len;
 	u8 cfp_cnt;
 	u8 cfp_period;
-	u16 cfp_max_duration;
-	u16 cfp_duration_remaining;
+	__le16 cfp_max_duration;
+	__le16 cfp_duration_remaining;
 } __packed;
 
 struct ieee_types_ibss_param_set {
@@ -957,7 +955,7 @@ struct mwifiex_hs_config_param {
 } __packed;
 
 struct hs_activate_param {
-	u16 resp_ctrl;
+	__le16 resp_ctrl;
 } __packed;
 
 struct host_cmd_ds_802_11_hs_cfg_enh {
@@ -1131,7 +1129,7 @@ struct host_cmd_ds_802_11_bg_scan_query {
 } __packed;
 
 struct host_cmd_ds_802_11_bg_scan_query_rsp {
-	u32 report_condition;
+	__le32 report_condition;
 	struct host_cmd_ds_802_11_scan_rsp scan_resp;
 } __packed;
 
@@ -1230,7 +1228,7 @@ struct mwifiex_ie_types_wmm_queue_status {
 	struct mwifiex_ie_types_header header;
 	u8 queue_index;
 	u8 disabled;
-	u16 medium_time;
+	__le16 medium_time;
 	u8 flow_required;
 	u8 flow_created;
 	u32 reserved;
@@ -1310,7 +1308,7 @@ struct mwifiex_ie_types_vht_oper {
 	u8 chan_center_freq_1;
 	u8 chan_center_freq_2;
 	/* Basic MCS set map, each 2 bits stands for a NSS */
-	u16 basic_mcs_map;
+	__le16 basic_mcs_map;
 } __packed;
 
 struct mwifiex_ie_types_wmmcap {
@@ -1592,12 +1590,6 @@ struct mwifiex_ie_list {
 	struct mwifiex_ie ie_list[MAX_MGMT_IE_INDEX];
 } __packed;
 
-struct host_cmd_ds_802_11_cfg_data {
-	__le16 action;
-	__le16 type;
-	__le16 data_len;
-} __packed;
-
 struct coalesce_filt_field_param {
 	u8 operation;
 	u8 operand_len;
@@ -1678,7 +1670,6 @@ struct host_cmd_ds_command {
 		struct host_cmd_ds_sys_config uap_sys_config;
 		struct host_cmd_ds_sta_deauth sta_deauth;
 		struct host_cmd_11ac_vht_cfg vht_cfg;
-		struct host_cmd_ds_802_11_cfg_data cfg_data;
 		struct host_cmd_ds_coalesce_cfg coalesce_cfg;
 	} params;
 } __packed;
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 6499117fce43..1d0a817f2bf0 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -643,7 +643,8 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
 			if (priv)
 				priv->stats.rx_dropped++;
 
-			adapter->if_ops.data_complete(adapter, skb);
+			dev_kfree_skb_any(skb);
+			adapter->if_ops.data_complete(adapter);
 		}
 	}
 
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 8bb8988c435c..4d79761b9c87 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -648,6 +648,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	tx_info = MWIFIEX_SKB_TXCB(skb);
 	tx_info->bss_num = priv->bss_num;
 	tx_info->bss_type = priv->bss_type;
+	tx_info->pkt_len = skb->len;
 
 	/* Record the current time the packet was queued; used to
 	 * determine the amount of time the packet was queued in
@@ -749,7 +750,7 @@ static u16
 mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
 				void *accel_priv)
 {
-	skb->priority = cfg80211_classify8021d(skb);
+	skb->priority = cfg80211_classify8021d(skb, NULL);
 	return mwifiex_1d_to_wmm_queue[skb->priority];
 }
 
@@ -992,12 +993,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
 		rtnl_unlock();
 	}
 
-	priv = adapter->priv[0];
-	if (!priv || !priv->wdev)
-		goto exit_remove;
-
-	wiphy_unregister(priv->wdev->wiphy);
-	wiphy_free(priv->wdev->wiphy);
+	wiphy_unregister(adapter->wiphy);
+	wiphy_free(adapter->wiphy);
 
 	mwifiex_terminate_workqueue(adapter);
 
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 1d72f13adb9d..d8ad554ce39f 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -32,6 +32,7 @@
 #include <net/lib80211.h>
 #include <linux/firmware.h>
 #include <linux/ctype.h>
+#include <linux/of.h>
 
 #include "decl.h"
 #include "ioctl.h"
@@ -615,7 +616,7 @@ struct mwifiex_if_ops {
 	void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
 	int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
 	int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
-	int (*data_complete) (struct mwifiex_adapter *, struct sk_buff *);
+	int (*data_complete) (struct mwifiex_adapter *);
 	int (*init_fw_port) (struct mwifiex_adapter *);
 	int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
 	void (*card_reset) (struct mwifiex_adapter *);
@@ -739,6 +740,7 @@ struct mwifiex_adapter {
 	u8 scan_delay_cnt;
 	u8 empty_tx_q_cnt;
 	const struct firmware *cal_data;
+	struct device_node *dt_node;
 
 	/* 11AC */
 	u32 is_hw_11ac_capable;
@@ -1151,6 +1153,9 @@ void mwifiex_uap_del_sta_data(struct mwifiex_private *priv,
 void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer,
 			      struct mwifiex_bssdescriptor *bss_desc);
 int mwifiex_11h_handle_event_chanswann(struct mwifiex_private *priv);
+int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
+			    struct device_node *node, const char *prefix);
+void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv);
 
 extern const struct ethtool_ops mwifiex_ethtool_ops;
 
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 8cf7d50a7603..0a8a26e10f01 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -515,14 +515,14 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
 				scan_chan_list[chan_idx].max_scan_time =
 					cpu_to_le16((u16) user_scan_in->
 					chan_list[0].scan_time);
-			else if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+			else if (ch->flags & IEEE80211_CHAN_NO_IR)
 				scan_chan_list[chan_idx].max_scan_time =
 					cpu_to_le16(adapter->passive_scan_time);
 			else
 				scan_chan_list[chan_idx].max_scan_time =
 					cpu_to_le16(adapter->active_scan_time);
 
-			if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+			if (ch->flags & IEEE80211_CHAN_NO_IR)
 				scan_chan_list[chan_idx].chan_scan_mode_bitmap
 					|= MWIFIEX_PASSIVE_SCAN;
 			else
@@ -1681,7 +1681,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
 		const u8 *ie_buf;
 		size_t ie_len;
 		u16 channel = 0;
-		u64 fw_tsf = 0;
+		__le64 fw_tsf = 0;
 		u16 beacon_size = 0;
 		u32 curr_bcn_bytes;
 		u32 freq;
@@ -1815,7 +1815,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
 					      ie_buf, ie_len, rssi, GFP_KERNEL);
 				bss_priv = (struct mwifiex_bss_priv *)bss->priv;
 				bss_priv->band = band;
-				bss_priv->fw_tsf = fw_tsf;
+				bss_priv->fw_tsf = le64_to_cpu(fw_tsf);
 				if (priv->media_connected &&
 				    !memcmp(bssid,
 					    priv->curr_bss_params.bss_descriptor
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 2181ee283d82..9208a8816b80 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -354,7 +354,7 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
 	}
 	if (hs_activate) {
 		hs_cfg->action = cpu_to_le16(HS_ACTIVATE);
-		hs_cfg->params.hs_activate.resp_ctrl = RESP_NEEDED;
+		hs_cfg->params.hs_activate.resp_ctrl = cpu_to_le16(RESP_NEEDED);
 	} else {
 		hs_cfg->action = cpu_to_le16(HS_CONFIGURE);
 		hs_cfg->params.hs_config.conditions = hscfg_param->conditions;
@@ -1156,30 +1156,62 @@ static u32 mwifiex_parse_cal_cfg(u8 *src, size_t len, u8 *dst)
 	return d - dst;
 }
 
+int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
+			    struct device_node *node, const char *prefix)
+{
+#ifdef CONFIG_OF
+	struct property *prop;
+	size_t len = strlen(prefix);
+	int ret;
+
+	/* look for all matching property names */
+	for_each_property_of_node(node, prop) {
+		if (len > strlen(prop->name) ||
+		    strncmp(prop->name, prefix, len))
+			continue;
+
+		/* property header is 6 bytes, data must fit in cmd buffer */
+		if (prop && prop->value && prop->length > 6 &&
+		    prop->length <= MWIFIEX_SIZE_OF_CMD_BUFFER - S_DS_GEN) {
+			ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_CFG_DATA,
+						    HostCmd_ACT_GEN_SET, 0,
+						    prop);
+			if (ret)
+				return ret;
+		}
+	}
+#endif
+	return 0;
+}
+
 /* This function prepares command of set_cfg_data. */
 static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
-				struct host_cmd_ds_command *cmd,
-				u16 cmd_action)
+				struct host_cmd_ds_command *cmd, void *data_buf)
 {
-	struct host_cmd_ds_802_11_cfg_data *cfg_data = &cmd->params.cfg_data;
 	struct mwifiex_adapter *adapter = priv->adapter;
-	u32 len, cal_data_offset;
-	u8 *tmp_cmd = (u8 *)cmd;
+	struct property *prop = data_buf;
+	u32 len;
+	u8 *data = (u8 *)cmd + S_DS_GEN;
+	int ret;
 
-	cal_data_offset = S_DS_GEN + sizeof(*cfg_data);
-	if ((adapter->cal_data->data) && (adapter->cal_data->size > 0))
+	if (prop) {
+		len = prop->length;
+		ret = of_property_read_u8_array(adapter->dt_node, prop->name,
+						data, len);
+		if (ret)
+			return ret;
+		dev_dbg(adapter->dev,
+			"download cfg_data from device tree: %s\n", prop->name);
+	} else if (adapter->cal_data->data && adapter->cal_data->size > 0) {
 		len = mwifiex_parse_cal_cfg((u8 *)adapter->cal_data->data,
-					    adapter->cal_data->size,
-					    (u8 *)(tmp_cmd + cal_data_offset));
-	else
+					    adapter->cal_data->size, data);
+		dev_dbg(adapter->dev, "download cfg_data from config file\n");
+	} else {
 		return -1;
-
-	cfg_data->action = cpu_to_le16(cmd_action);
-	cfg_data->type = cpu_to_le16(CFG_DATA_TYPE_CAL);
-	cfg_data->data_len = cpu_to_le16(len);
+	}
 
 	cmd->command = cpu_to_le16(HostCmd_CMD_CFG_DATA);
-	cmd->size = cpu_to_le16(S_DS_GEN + sizeof(*cfg_data) + len);
+	cmd->size = cpu_to_le16(S_DS_GEN + len);
 
 	return 0;
 }
@@ -1267,7 +1299,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
 		ret = mwifiex_cmd_get_hw_spec(priv, cmd_ptr);
 		break;
 	case HostCmd_CMD_CFG_DATA:
-		ret = mwifiex_cmd_cfg_data(priv, cmd_ptr, cmd_action);
+		ret = mwifiex_cmd_cfg_data(priv, cmd_ptr, data_buf);
 		break;
 	case HostCmd_CMD_MAC_CONTROL:
 		ret = mwifiex_cmd_mac_control(priv, cmd_ptr, cmd_action,
@@ -1527,7 +1559,19 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
 		if (ret)
 			return -1;
 
-		/* Download calibration data to firmware */
+		/* Download calibration data to firmware.
+		 * The cal-data can be read from device tree and/or
+		 * a configuration file and downloaded to firmware.
+		 */
+		adapter->dt_node =
+				of_find_node_by_name(NULL, "marvell_cfgdata");
+		if (adapter->dt_node) {
+			ret = mwifiex_dnld_dt_cfgdata(priv, adapter->dt_node,
+						      "marvell,caldata");
+			if (ret)
+				return -1;
+		}
+
 		if (adapter->cal_data) {
 			ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_CFG_DATA,
 						HostCmd_ACT_GEN_SET, 0, NULL);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 2675ca7f8d14..24523e4015cb 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -338,8 +338,7 @@ static int mwifiex_get_power_level(struct mwifiex_private *priv, void *data_buf)
 	if (!data_buf)
 		return -1;
 
-	pg_tlv_hdr = (struct mwifiex_types_power_group *)
-		((u8 *) data_buf + sizeof(struct host_cmd_ds_txpwr_cfg));
+	pg_tlv_hdr = (struct mwifiex_types_power_group *)((u8 *)data_buf);
 	pg = (struct mwifiex_power_group *)
 		((u8 *) pg_tlv_hdr + sizeof(struct mwifiex_types_power_group));
 	length = le16_to_cpu(pg_tlv_hdr->length);
@@ -383,19 +382,25 @@ static int mwifiex_ret_tx_power_cfg(struct mwifiex_private *priv,
 	struct mwifiex_types_power_group *pg_tlv_hdr;
 	struct mwifiex_power_group *pg;
 	u16 action = le16_to_cpu(txp_cfg->action);
+	u16 tlv_buf_left;
 
-	switch (action) {
-	case HostCmd_ACT_GEN_GET:
-		pg_tlv_hdr = (struct mwifiex_types_power_group *)
-			((u8 *) txp_cfg +
-			 sizeof(struct host_cmd_ds_txpwr_cfg));
+	pg_tlv_hdr = (struct mwifiex_types_power_group *)
+		((u8 *)txp_cfg +
+		 sizeof(struct host_cmd_ds_txpwr_cfg));
 
-		pg = (struct mwifiex_power_group *)
-			((u8 *) pg_tlv_hdr +
-			 sizeof(struct mwifiex_types_power_group));
+	pg = (struct mwifiex_power_group *)
+		((u8 *)pg_tlv_hdr +
+		 sizeof(struct mwifiex_types_power_group));
 
+	tlv_buf_left = le16_to_cpu(resp->size) - S_DS_GEN - sizeof(*txp_cfg);
+	if (tlv_buf_left <
+			le16_to_cpu(pg_tlv_hdr->length) + sizeof(*pg_tlv_hdr))
+		return 0;
+
+	switch (action) {
+	case HostCmd_ACT_GEN_GET:
 		if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
-			mwifiex_get_power_level(priv, txp_cfg);
+			mwifiex_get_power_level(priv, pg_tlv_hdr);
 
 		priv->tx_power_level = (u16) pg->power_min;
 		break;
@@ -404,14 +409,6 @@ static int mwifiex_ret_tx_power_cfg(struct mwifiex_private *priv,
 		if (!le32_to_cpu(txp_cfg->mode))
 			break;
 
-		pg_tlv_hdr = (struct mwifiex_types_power_group *)
-			((u8 *) txp_cfg +
-			 sizeof(struct host_cmd_ds_txpwr_cfg));
-
-		pg = (struct mwifiex_power_group *)
-			((u8 *) pg_tlv_hdr +
-			 sizeof(struct mwifiex_types_power_group));
-
 		if (pg->power_max == pg->power_min)
 			priv->tx_power_level = (u16) pg->power_min;
 		break;
@@ -785,8 +782,7 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
 	}
 
 	/* If BSSID is diff, modify current BSS parameters */
-	if (memcmp(priv->curr_bss_params.bss_descriptor.mac_address,
-		   ibss_coal_resp->bssid, ETH_ALEN)) {
+	if (!ether_addr_equal(priv->curr_bss_params.bss_descriptor.mac_address, ibss_coal_resp->bssid)) {
 		/* BSSID */
 		memcpy(priv->curr_bss_params.bss_descriptor.mac_address,
 		       ibss_coal_resp->bssid, ETH_ALEN);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index a09398fe9e2a..c5cb2ed19ec2 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -184,6 +184,16 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
 	return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
 }
 
+void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv)
+{
+	if (priv->adapter->dt_node) {
+		char txpwr[] = {"marvell,00_txpwrlimit"};
+
+		memcpy(&txpwr[8], priv->adapter->country_code, 2);
+		mwifiex_dnld_dt_cfgdata(priv, priv->adapter->dt_node, txpwr);
+	}
+}
+
 static int mwifiex_process_country_ie(struct mwifiex_private *priv,
 				      struct cfg80211_bss *bss)
 {
@@ -205,6 +215,14 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
 		return 0;
 	}
 
+	if (!strncmp(priv->adapter->country_code, &country_ie[2], 2)) {
+		rcu_read_unlock();
+		wiphy_dbg(priv->wdev->wiphy,
+			  "11D: skip setting domain info in FW\n");
+		return 0;
+	}
+	memcpy(priv->adapter->country_code, &country_ie[2], 2);
+
 	domain_info->country_code[0] = country_ie[2];
 	domain_info->country_code[1] = country_ie[3];
 	domain_info->country_code[2] = ' ';
@@ -226,6 +244,8 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
 		return -1;
 	}
 
+	mwifiex_dnld_txpwr_table(priv);
+
 	return 0;
 }
 
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index bb22664923ef..4651d676df38 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -36,12 +36,12 @@ mwifiex_discard_gratuitous_arp(struct mwifiex_private *priv,
 			       struct sk_buff *skb)
 {
 	const struct mwifiex_arp_eth_header *arp;
-	struct ethhdr *eth_hdr;
+	struct ethhdr *eth;
 	struct ipv6hdr *ipv6;
 	struct icmp6hdr *icmpv6;
 
-	eth_hdr = (struct ethhdr *)skb->data;
-	switch (ntohs(eth_hdr->h_proto)) {
+	eth = (struct ethhdr *)skb->data;
+	switch (ntohs(eth->h_proto)) {
 	case ETH_P_ARP:
 		arp = (void *)(skb->data + sizeof(struct ethhdr));
 		if (arp->hdr.ar_op == htons(ARPOP_REPLY) ||
@@ -87,16 +87,19 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
 	struct rx_packet_hdr *rx_pkt_hdr;
 	struct rxpd *local_rx_pd;
 	int hdr_chop;
-	struct ethhdr *eth_hdr;
-	u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+	struct ethhdr *eth;
 
 	local_rx_pd = (struct rxpd *) (skb->data);
 
 	rx_pkt_hdr = (void *)local_rx_pd +
 		     le16_to_cpu(local_rx_pd->rx_pkt_offset);
 
-	if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
-		    rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) {
+	if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
+		     sizeof(bridge_tunnel_header))) ||
+	    (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
+		     sizeof(rfc1042_header)) &&
+	     ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
+	     ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
 		/*
 		 *  Replace the 803 header and rfc1042 header (llc/snap) with an
 		 *    EthernetII header, keep the src/dst and snap_type
@@ -106,7 +109,7 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
 		 *  To create the Ethernet II, just move the src, dst address
 		 *    right before the snap_type.
 		 */
-		eth_hdr = (struct ethhdr *)
+		eth = (struct ethhdr *)
 			((u8 *) &rx_pkt_hdr->eth803_hdr
 			 + sizeof(rx_pkt_hdr->eth803_hdr) +
 			 sizeof(rx_pkt_hdr->rfc1042_hdr)
@@ -114,14 +117,14 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
 			 - sizeof(rx_pkt_hdr->eth803_hdr.h_source)
 			 - sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type));
 
-		memcpy(eth_hdr->h_source, rx_pkt_hdr->eth803_hdr.h_source,
-		       sizeof(eth_hdr->h_source));
-		memcpy(eth_hdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest,
-		       sizeof(eth_hdr->h_dest));
+		memcpy(eth->h_source, rx_pkt_hdr->eth803_hdr.h_source,
+		       sizeof(eth->h_source));
+		memcpy(eth->h_dest, rx_pkt_hdr->eth803_hdr.h_dest,
+		       sizeof(eth->h_dest));
 
 		/* Chop off the rxpd + the excess memory from the 802.2/llc/snap
 		   header that was removed. */
-		hdr_chop = (u8 *) eth_hdr - (u8 *) local_rx_pd;
+		hdr_chop = (u8 *) eth - (u8 *) local_rx_pd;
 	} else {
 		/* Chop off the rxpd */
 		hdr_chop = (u8 *) &rx_pkt_hdr->eth803_hdr -
@@ -185,12 +188,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
 			"wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
 			skb->len, rx_pkt_offset, rx_pkt_length);
 		priv->stats.rx_dropped++;
-
-		if (adapter->if_ops.data_complete)
-			adapter->if_ops.data_complete(adapter, skb);
-		else
-			dev_kfree_skb_any(skb);
-
+		dev_kfree_skb_any(skb);
 		return ret;
 	}
 
@@ -226,7 +224,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
 	 * directly to os. Don't pass thru rx reordering
 	 */
 	if (!IS_11N_ENABLED(priv) ||
-	    memcmp(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN)) {
+	    !ether_addr_equal_unaligned(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest)) {
 		mwifiex_process_rx_packet(priv, skb);
 		return ret;
 	}
@@ -244,12 +242,8 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
 	ret = mwifiex_11n_rx_reorder_pkt(priv, seq_num, local_rx_pd->priority,
 					 ta, (u8) rx_pkt_type, skb);
 
-	if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
-		if (adapter->if_ops.data_complete)
-			adapter->if_ops.data_complete(adapter, skb);
-		else
-			dev_kfree_skb_any(skb);
-	}
+	if (ret || (rx_pkt_type == PKT_TYPE_BAR))
+		dev_kfree_skb_any(skb);
 
 	if (ret)
 		priv->stats.rx_dropped++;
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 7b581af24f5f..354d64c9606f 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -148,6 +148,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
 	tx_info = MWIFIEX_SKB_TXCB(skb);
 	tx_info->bss_num = priv->bss_num;
 	tx_info->bss_type = priv->bss_type;
+	tx_info->pkt_len = data_len - (sizeof(struct txpd) + INTF_HEADER_LEN);
 	skb_reserve(skb, sizeof(struct txpd) + INTF_HEADER_LEN);
 	skb_push(skb, sizeof(struct txpd));
 
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 8f923d0d2ba6..37f26afd4314 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -40,6 +40,7 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
 		mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
 	struct rxpd *local_rx_pd;
 	struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
+	int ret;
 
 	local_rx_pd = (struct rxpd *) (skb->data);
 	/* Get the BSS number from rxpd, get corresponding priv */
@@ -58,9 +59,15 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
 	rx_info->bss_type = priv->bss_type;
 
 	if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
-		return mwifiex_process_uap_rx_packet(priv, skb);
+		ret = mwifiex_process_uap_rx_packet(priv, skb);
+	else
+		ret = mwifiex_process_sta_rx_packet(priv, skb);
+
+	/* Decrement RX pending counter for each packet */
+	if (adapter->if_ops.data_complete)
+		adapter->if_ops.data_complete(adapter);
 
-	return mwifiex_process_sta_rx_packet(priv, skb);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
 
@@ -105,7 +112,7 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
 
 	switch (ret) {
 	case -ENOSR:
-		dev_err(adapter->dev, "data: -ENOSR is returned\n");
+		dev_dbg(adapter->dev, "data: -ENOSR is returned\n");
 		break;
 	case -EBUSY:
 		if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
@@ -168,7 +175,7 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
 	mwifiex_set_trans_start(priv->netdev);
 	if (!status) {
 		priv->stats.tx_packets++;
-		priv->stats.tx_bytes += skb->len;
+		priv->stats.tx_bytes += tx_info->pkt_len;
 		if (priv->tx_timeout_cnt)
 			priv->tx_timeout_cnt = 0;
 	} else {
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 92f76d655e6c..3c74eb254927 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -98,7 +98,6 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
 	int hdr_chop;
 	struct timeval tv;
 	struct ethhdr *p_ethhdr;
-	u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
 
 	uap_rx_pd = (struct uap_rxpd *)(skb->data);
 	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
@@ -112,8 +111,12 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
 		return;
 	}
 
-	if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
-		    rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) {
+	if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
+		     sizeof(bridge_tunnel_header))) ||
+	    (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
+		     sizeof(rfc1042_header)) &&
+	     ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
+	     ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
 		/* Replace the 803 header and rfc1042 header (llc/snap) with
 		 * an Ethernet II header, keep the src/dst and snap_type
 		 * (ethertype).
@@ -144,7 +147,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
 		hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd;
 	}
 
-	/* Chop off the leading header bytes so the it points
+	/* Chop off the leading header bytes so that it points
 	 * to the start of either the reconstructed EthII frame
 	 * or the 802.2/llc/snap frame.
 	 */
@@ -176,6 +179,19 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
 	tx_info->bss_type = priv->bss_type;
 	tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
 
+	if (is_unicast_ether_addr(rx_pkt_hdr->eth803_hdr.h_dest)) {
+		/* Update bridge packet statistics as the
+		 * packet is not going to kernel/upper layer.
+		 */
+		priv->stats.rx_bytes += skb->len;
+		priv->stats.rx_packets++;
+
+		/* Sending bridge packet to TX queue, so save the packet
+		 * length in TXCB to update statistics in TX complete.
+		 */
+		tx_info->pkt_len = skb->len;
+	}
+
 	do_gettimeofday(&tv);
 	skb->tstamp = timeval_to_ktime(tv);
 	mwifiex_wmm_add_buf_txqueue(priv, skb);
@@ -264,12 +280,7 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
 			skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
 			le16_to_cpu(uap_rx_pd->rx_pkt_length));
 		priv->stats.rx_dropped++;
-
-		if (adapter->if_ops.data_complete)
-			adapter->if_ops.data_complete(adapter, skb);
-		else
-			dev_kfree_skb_any(skb);
-
+		dev_kfree_skb_any(skb);
 		return 0;
 	}
 
@@ -323,12 +334,8 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
 					 uap_rx_pd->priority, ta, pkt_type,
 					 skb);
 
-	if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
-		if (adapter->if_ops.data_complete)
-			adapter->if_ops.data_complete(adapter, skb);
-		else
-			dev_kfree_skb_any(skb);
-	}
+	if (ret || (rx_pkt_type == PKT_TYPE_BAR))
+		dev_kfree_skb_any(skb);
 
 	if (ret)
 		priv->stats.rx_dropped++;
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index edf5b7a24900..e8ebbd4bc3cd 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -22,15 +22,21 @@
 
 #define USB_VERSION	"1.0"
 
-static const char usbdriver_name[] = "usb8797";
+static const char usbdriver_name[] = "usb8xxx";
 
 static struct mwifiex_if_ops usb_ops;
 static struct semaphore add_remove_card_sem;
 static struct usb_card_rec *usb_card;
 
 static struct usb_device_id mwifiex_usb_table[] = {
-	{USB_DEVICE(USB8797_VID, USB8797_PID_1)},
-	{USB_DEVICE_AND_INTERFACE_INFO(USB8797_VID, USB8797_PID_2,
+	/* 8797 */
+	{USB_DEVICE(USB8XXX_VID, USB8797_PID_1)},
+	{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8797_PID_2,
+				       USB_CLASS_VENDOR_SPEC,
+				       USB_SUBCLASS_VENDOR_SPEC, 0xff)},
+	/* 8897 */
+	{USB_DEVICE(USB8XXX_VID, USB8897_PID_1)},
+	{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8897_PID_2,
 				       USB_CLASS_VENDOR_SPEC,
 				       USB_SUBCLASS_VENDOR_SPEC, 0xff)},
 	{ }	/* Terminating entry */
@@ -343,10 +349,20 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
 		 id_vendor, id_product, bcd_device);
 
 	/* PID_1 is used for firmware downloading only */
-	if (id_product == USB8797_PID_1)
-		card->usb_boot_state = USB8797_FW_DNLD;
-	else
-		card->usb_boot_state = USB8797_FW_READY;
+	switch (id_product) {
+	case USB8797_PID_1:
+	case USB8897_PID_1:
+		card->usb_boot_state = USB8XXX_FW_DNLD;
+		break;
+	case USB8797_PID_2:
+	case USB8897_PID_2:
+		card->usb_boot_state = USB8XXX_FW_READY;
+		break;
+	default:
+		pr_warning("unknown id_product %#x\n", id_product);
+		card->usb_boot_state = USB8XXX_FW_DNLD;
+		break;
+	}
 
 	card->udev = udev;
 	card->intf = intf;
@@ -755,9 +771,20 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
 
 	card->adapter = adapter;
 	adapter->dev = &card->udev->dev;
-	strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME);
 	usb_card = card;
 
+	switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
+	case USB8897_PID_1:
+	case USB8897_PID_2:
+		strcpy(adapter->fw_name, USB8897_DEFAULT_FW_NAME);
+		break;
+	case USB8797_PID_1:
+	case USB8797_PID_2:
+	default:
+		strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME);
+		break;
+	}
+
 	return 0;
 }
 
@@ -773,7 +800,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
 {
 	int ret = 0;
 	u8 *firmware = fw->fw_buf, *recv_buff;
-	u32 retries = USB8797_FW_MAX_RETRY, dlen;
+	u32 retries = USB8XXX_FW_MAX_RETRY, dlen;
 	u32 fw_seqnum = 0, tlen = 0, dnld_cmd = 0;
 	struct fw_data *fwdata;
 	struct fw_sync_header sync_fw;
@@ -875,7 +902,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
 				continue;
 			}
 
-			retries = USB8797_FW_MAX_RETRY;
+			retries = USB8XXX_FW_MAX_RETRY;
 			break;
 		}
 		fw_seqnum++;
@@ -899,13 +926,13 @@ static int mwifiex_usb_dnld_fw(struct mwifiex_adapter *adapter,
 	int ret;
 	struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
 
-	if (card->usb_boot_state == USB8797_FW_DNLD) {
+	if (card->usb_boot_state == USB8XXX_FW_DNLD) {
 		ret = mwifiex_prog_fw_w_helper(adapter, fw);
 		if (ret)
 			return -1;
 
 		/* Boot state changes after successful firmware download */
-		if (card->usb_boot_state == USB8797_FW_DNLD)
+		if (card->usb_boot_state == USB8XXX_FW_DNLD)
 			return -1;
 	}
 
@@ -938,11 +965,9 @@ static int mwifiex_usb_cmd_event_complete(struct mwifiex_adapter *adapter,
 	return 0;
 }
 
-static int mwifiex_usb_data_complete(struct mwifiex_adapter *adapter,
-				     struct sk_buff *skb)
+static int mwifiex_usb_data_complete(struct mwifiex_adapter *adapter)
 {
 	atomic_dec(&adapter->rx_pending);
-	dev_kfree_skb_any(skb);
 
 	return 0;
 }
@@ -1041,4 +1066,5 @@ MODULE_AUTHOR("Marvell International Ltd.");
 MODULE_DESCRIPTION("Marvell WiFi-Ex USB Driver version" USB_VERSION);
 MODULE_VERSION(USB_VERSION);
 MODULE_LICENSE("GPL v2");
-MODULE_FIRMWARE("mrvl/usb8797_uapsta.bin");
+MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h
index 98c4316cd1a9..15b73d12e998 100644
--- a/drivers/net/wireless/mwifiex/usb.h
+++ b/drivers/net/wireless/mwifiex/usb.h
@@ -22,19 +22,23 @@
 
 #include <linux/usb.h>
 
-#define USB8797_VID		0x1286
+#define USB8XXX_VID		0x1286
+
 #define USB8797_PID_1		0x2043
 #define USB8797_PID_2		0x2044
+#define USB8897_PID_1		0x2045
+#define USB8897_PID_2		0x2046
 
-#define USB8797_FW_DNLD		1
-#define USB8797_FW_READY	2
-#define USB8797_FW_MAX_RETRY	3
+#define USB8XXX_FW_DNLD		1
+#define USB8XXX_FW_READY	2
+#define USB8XXX_FW_MAX_RETRY	3
 
 #define MWIFIEX_TX_DATA_URB	6
 #define MWIFIEX_RX_DATA_URB	6
 #define MWIFIEX_USB_TIMEOUT	100
 
 #define USB8797_DEFAULT_FW_NAME	"mrvl/usb8797_uapsta.bin"
+#define USB8897_DEFAULT_FW_NAME	"mrvl/usb8897_uapsta.bin"
 
 #define FW_DNLD_TX_BUF_SIZE	620
 #define FW_DNLD_RX_BUF_SIZE	2048
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 5d9e150f4111..9b82e225880c 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -191,6 +191,9 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
 	if (!skb)
 		return -1;
 
+	priv->stats.rx_bytes += skb->len;
+	priv->stats.rx_packets++;
+
 	skb->dev = priv->netdev;
 	skb->protocol = eth_type_trans(skb, priv->netdev);
 	skb->ip_summed = CHECKSUM_NONE;
@@ -217,8 +220,6 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
 	    (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
 		skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
 
-	priv->stats.rx_bytes += skb->len;
-	priv->stats.rx_packets++;
 	if (in_interrupt())
 		netif_rx(skb);
 	else
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index b953ad621e0b..4987c3f942ce 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -9,7 +9,6 @@
  * warranty of any kind, whether express or implied.
  */
 
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -1258,7 +1257,7 @@ mwl8k_capture_bssid(struct mwl8k_priv *priv, struct ieee80211_hdr *wh)
 {
 	return priv->capture_beacon &&
 		ieee80211_is_beacon(wh->frame_control) &&
-		ether_addr_equal(wh->addr3, priv->capture_bssid);
+		ether_addr_equal_64bits(wh->addr3, priv->capture_bssid);
 }
 
 static inline void mwl8k_save_beacon(struct ieee80211_hw *hw,
@@ -5893,8 +5892,6 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
 
 	hw->extra_tx_headroom -= priv->ap_fw ? REDUCED_TX_HEADROOM : 0;
 
-	hw->channel_change_time = 10;
-
 	hw->queues = MWL8K_TX_WMM_QUEUES;
 
 	/* Set rssi values to dBm */
diff --git a/drivers/net/wireless/orinoco/hermes.c b/drivers/net/wireless/orinoco/hermes.c
index 75c15bc7b34c..43790fbea0e0 100644
--- a/drivers/net/wireless/orinoco/hermes.c
+++ b/drivers/net/wireless/orinoco/hermes.c
@@ -40,7 +40,6 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 
 #include "hermes.h"
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index d21d95939316..c0a27377d9e2 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -15,7 +15,6 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <pcmcia/cistpl.h>
 #include <pcmcia/cisreg.h>
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index bdfe637953f4..f9805c9353d2 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -52,7 +52,6 @@
 #include <linux/signal.h>
 #include <linux/errno.h>
 #include <linux/poll.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/fcntl.h>
 #include <linux/spinlock.h>
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index e2264bc12ebf..b60048c95e0a 100644
--- a/drivers/net/wireless/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -23,7 +23,6 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <pcmcia/cistpl.h>
 #include <pcmcia/cisreg.h>
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index d43e3740e45d..0fe67d2da208 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -16,7 +16,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/init.h>
 #include <linux/firmware.h>
 #include <linux/etherdevice.h>
 #include <linux/sort.h>
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index b3879fbf5368..bc065e8e348b 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -16,7 +16,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/firmware.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/p54/led.c b/drivers/net/wireless/p54/led.c
index 3837e1eec5f4..1f6fd5ff5531 100644
--- a/drivers/net/wireless/p54/led.c
+++ b/drivers/net/wireless/p54/led.c
@@ -16,7 +16,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/init.h>
 #include <linux/firmware.h>
 #include <linux/etherdevice.h>
 
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 067e6f2fd050..eede90b63f84 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -16,7 +16,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/firmware.h>
 #include <linux/etherdevice.h>
@@ -757,7 +756,6 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
 				      BIT(NL80211_IFTYPE_AP) |
 				      BIT(NL80211_IFTYPE_MESH_POINT);
 
-	dev->channel_change_time = 1000;	/* TODO: find actual value */
 	priv->beacon_req_id = cpu_to_le32(0);
 	priv->tx_stats[P54_QUEUE_BEACON].limit = 1;
 	priv->tx_stats[P54_QUEUE_FWSCAN].limit = 1;
diff --git a/drivers/net/wireless/p54/net2280.h b/drivers/net/wireless/p54/net2280.h
index e3ed893b5aaf..aedfaf24f386 100644
--- a/drivers/net/wireless/p54/net2280.h
+++ b/drivers/net/wireless/p54/net2280.h
@@ -20,8 +20,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*-------------------------------------------------------------------------*/
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index f9a07b0d83ac..d411de409050 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -13,7 +13,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/firmware.h>
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index e328d3058c41..6e635cfa24c8 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -12,7 +12,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/init.h>
 #include <linux/usb.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index f95de0d16216..153c61539ec8 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -17,7 +17,6 @@
  */
 
 #include <linux/export.h>
-#include <linux/init.h>
 #include <linux/firmware.h>
 #include <linux/etherdevice.h>
 #include <asm/div64.h>
@@ -308,7 +307,7 @@ static void p54_pspoll_workaround(struct p54_common *priv, struct sk_buff *skb)
 		return;
 
 	/* only consider beacons from the associated BSSID */
-	if (!ether_addr_equal(hdr->addr3, priv->bssid))
+	if (!ether_addr_equal_64bits(hdr->addr3, priv->bssid))
 		return;
 
 	tim = p54_find_ie(skb, WLAN_EID_TIM);
@@ -587,7 +586,7 @@ static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb)
 	chan = priv->curchan;
 	if (chan) {
 		struct survey_info *survey = &priv->survey[chan->hw_value];
-		survey->noise = clamp_t(s8, priv->noise, -128, 127);
+		survey->noise = clamp(priv->noise, -128, 127);
 		survey->channel_time = priv->survey_raw.active;
 		survey->channel_time_tx = priv->survey_raw.tx;
 		survey->channel_time_busy = priv->survey_raw.tx +
diff --git a/drivers/net/wireless/prism54/isl_38xx.c b/drivers/net/wireless/prism54/isl_38xx.c
index 02fc67bccbd0..333c1a2f882e 100644
--- a/drivers/net/wireless/prism54/isl_38xx.c
+++ b/drivers/net/wireless/prism54/isl_38xx.c
@@ -12,8 +12,7 @@
  *  GNU General Public License for more details.
  *
  *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  */
 
diff --git a/drivers/net/wireless/prism54/isl_38xx.h b/drivers/net/wireless/prism54/isl_38xx.h
index 19c33d313734..547ab885610b 100644
--- a/drivers/net/wireless/prism54/isl_38xx.h
+++ b/drivers/net/wireless/prism54/isl_38xx.h
@@ -11,8 +11,7 @@
  *  GNU General Public License for more details.
  *
  *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  */
 
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 8863a6cb2388..78fa64d3f223 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -14,8 +14,7 @@
  *  GNU General Public License for more details.
  *
  *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  */
 
@@ -25,6 +24,7 @@
 #include <linux/if_arp.h>
 #include <linux/slab.h>
 #include <linux/pci.h>
+#include <linux/etherdevice.h>
 
 #include <asm/uaccess.h>
 
@@ -1861,7 +1861,7 @@ prism54_del_mac(struct net_device *ndev, struct iw_request_info *info,
 	if (mutex_lock_interruptible(&acl->lock))
 		return -ERESTARTSYS;
 	list_for_each_entry(entry, &acl->mac_list, _list) {
-		if (memcmp(entry->addr, addr->sa_data, ETH_ALEN) == 0) {
+		if (ether_addr_equal(entry->addr, addr->sa_data)) {
 			list_del(&entry->_list);
 			acl->size--;
 			kfree(entry);
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h
index a34bceb6e3cd..842a2549facc 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.h
+++ b/drivers/net/wireless/prism54/isl_ioctl.h
@@ -13,8 +13,7 @@
  *  GNU General Public License for more details.
  *
  *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  */
 
diff --git a/drivers/net/wireless/prism54/isl_oid.h b/drivers/net/wireless/prism54/isl_oid.h
index 59e31258d450..83fec557997e 100644
--- a/drivers/net/wireless/prism54/isl_oid.h
+++ b/drivers/net/wireless/prism54/isl_oid.h
@@ -13,8 +13,7 @@
  *  GNU General Public License for more details.
  *
  *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  */
 
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index e05d9b4c8317..931cf440ff18 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -13,8 +13,7 @@
  *  GNU General Public License for more details.
  *
  *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  */
 
@@ -914,7 +913,6 @@ islpci_setup(struct pci_dev *pdev)
       do_islpci_free_memory:
 	islpci_free_memory(priv);
       do_free_netdev:
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(ndev);
 	priv = NULL;
 	return NULL;
diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/prism54/islpci_dev.h
index c40403877f97..f6f088e05fe4 100644
--- a/drivers/net/wireless/prism54/islpci_dev.h
+++ b/drivers/net/wireless/prism54/islpci_dev.h
@@ -14,8 +14,7 @@
  *  GNU General Public License for more details.
  *
  *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  */
 
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 799e148d0370..674658f2e6ef 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -11,8 +11,7 @@
  *  GNU General Public License for more details.
  *
  *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  */
 
diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/prism54/islpci_eth.h
index 6ca30a5b7bfb..80f50f1bc6f2 100644
--- a/drivers/net/wireless/prism54/islpci_eth.h
+++ b/drivers/net/wireless/prism54/islpci_eth.h
@@ -11,8 +11,7 @@
  *  GNU General Public License for more details.
  *
  *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  */
 
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index 9e68e0cb718e..1105a12dbde8 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -12,8 +12,7 @@
  *  GNU General Public License for more details.
  *
  *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  */
 
@@ -199,7 +198,6 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)
       do_unregister_netdev:
 	unregister_netdev(ndev);
 	islpci_free_memory(priv);
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(ndev);
 	priv = NULL;
       do_pci_clear_mwi:
@@ -247,7 +245,6 @@ prism54_remove(struct pci_dev *pdev)
 	/* free the PCI memory and unmap the remapped page */
 	islpci_free_memory(priv);
 
-	pci_set_drvdata(pdev, NULL);
 	free_netdev(ndev);
 	priv = NULL;
 
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index 9f19cceab487..0de14dfa68cc 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -12,8 +12,7 @@
  *  GNU General Public License for more details.
  *
  *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  */
 
diff --git a/drivers/net/wireless/prism54/islpci_mgt.h b/drivers/net/wireless/prism54/islpci_mgt.h
index 0db93db9b675..700c434c8803 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.h
+++ b/drivers/net/wireless/prism54/islpci_mgt.h
@@ -12,8 +12,7 @@
  *  GNU General Public License for more details.
  *
  *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  */
 
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index 056af38e72e3..47b34bfe890a 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -11,8 +11,7 @@
  *  GNU General Public License for more details.
  *
  *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  */
 
diff --git a/drivers/net/wireless/prism54/oid_mgt.h b/drivers/net/wireless/prism54/oid_mgt.h
index 92c8a2d4acd8..cf5141df8474 100644
--- a/drivers/net/wireless/prism54/oid_mgt.h
+++ b/drivers/net/wireless/prism54/oid_mgt.h
@@ -11,8 +11,7 @@
  *  GNU General Public License for more details.
  *
  *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  */
 
diff --git a/drivers/net/wireless/prism54/prismcompat.h b/drivers/net/wireless/prism54/prismcompat.h
index aa1d1747784f..bc1401eb4b9d 100644
--- a/drivers/net/wireless/prism54/prismcompat.h
+++ b/drivers/net/wireless/prism54/prismcompat.h
@@ -11,8 +11,7 @@
  *  GNU General Public License for more details.
  *
  *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  */
 
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 9b557a1bb7f8..cbf0a589d32a 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -17,8 +17,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  * Changes:
  * Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 08/08/2000
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 8169a85c4498..5028557aa18a 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -15,8 +15,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  *
  *  Portions of this file are based on NDISwrapper project,
  *  Copyright (C) 2003-2005 Pontus Fuchs, Giridhar Pemmasani
@@ -27,7 +26,6 @@
 // #define	VERBOSE			// more; success messages
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 38ed9a3e44c8..4ccfef5094e0 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
@@ -26,7 +24,6 @@
 
 #include <linux/delay.h>
 #include <linux/etherdevice.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index e4b07f0aa3cc..0fd3a9d01a60 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 0ac5c589ddce..abc5f56f29fe 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
@@ -26,7 +24,6 @@
 
 #include <linux/delay.h>
 #include <linux/etherdevice.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index 9c10068e4987..573e87bcc553 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 85acc79f68b8..9f16824cd1bc 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
@@ -26,7 +24,6 @@
 
 #include <linux/delay.h>
 #include <linux/etherdevice.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index 1b91a4cef965..afba0739c3b8 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index aab6b5e4f5dd..a394a9a95919 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -21,9 +21,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 776aff3678ff..b8f5b06006c4 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -24,9 +24,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
@@ -5462,15 +5460,14 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
 
 	rt2800_bbp_write(rt2x00dev, 68, 0x0b);
 
-	rt2800_bbp_write(rt2x00dev, 69, 0x12);
+	rt2800_bbp_write(rt2x00dev, 69, 0x0d);
+	rt2800_bbp_write(rt2x00dev, 70, 0x06);
 	rt2800_bbp_write(rt2x00dev, 73, 0x13);
 	rt2800_bbp_write(rt2x00dev, 75, 0x46);
 	rt2800_bbp_write(rt2x00dev, 76, 0x28);
 
 	rt2800_bbp_write(rt2x00dev, 77, 0x59);
 
-	rt2800_bbp_write(rt2x00dev, 70, 0x0a);
-
 	rt2800_bbp_write(rt2x00dev, 79, 0x13);
 	rt2800_bbp_write(rt2x00dev, 80, 0x05);
 	rt2800_bbp_write(rt2x00dev, 81, 0x33);
@@ -5513,6 +5510,7 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
 	if (rt2x00_rt(rt2x00dev, RT5392)) {
 		rt2800_bbp_write(rt2x00dev, 134, 0xd0);
 		rt2800_bbp_write(rt2x00dev, 135, 0xf6);
+		rt2800_bbp_write(rt2x00dev, 148, 0x84);
 	}
 
 	rt2800_disable_unused_dac_adc(rt2x00dev);
@@ -6453,7 +6451,7 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
 	rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
 	rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
 	rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
-	rt2800_rfcsr_write(rt2x00dev, 12, 0xc6);
+	rt2800_rfcsr_write(rt2x00dev, 12, 0x46);
 	rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
 	rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
 	rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
@@ -6466,7 +6464,8 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
 	rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
 	rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
 	rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
-	if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+	if (rt2x00_is_usb(rt2x00dev) &&
+	    rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
 		rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
 	else
 		rt2800_rfcsr_write(rt2x00dev, 25, 0xc0);
@@ -6486,10 +6485,7 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
 	rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
 	rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
 
-	if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
-		rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
-	else
-		rt2800_rfcsr_write(rt2x00dev, 40, 0x4b);
+	rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
 	rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
 	rt2800_rfcsr_write(rt2x00dev, 42, 0xd2);
 	rt2800_rfcsr_write(rt2x00dev, 43, 0x9a);
@@ -6510,16 +6506,26 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
 		rt2800_rfcsr_write(rt2x00dev, 53, 0x84);
 	rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
 	rt2800_rfcsr_write(rt2x00dev, 55, 0x44);
-	rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
+	if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+		rt2800_rfcsr_write(rt2x00dev, 56, 0x42);
+	else
+		rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
 	rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
 	rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
 	rt2800_rfcsr_write(rt2x00dev, 59, 0x8f);
 
 	rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
-	if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
-		rt2800_rfcsr_write(rt2x00dev, 61, 0xd1);
-	else
-		rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
+	if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
+		if (rt2x00_is_usb(rt2x00dev))
+			rt2800_rfcsr_write(rt2x00dev, 61, 0xd1);
+		else
+			rt2800_rfcsr_write(rt2x00dev, 61, 0xd5);
+	} else {
+		if (rt2x00_is_usb(rt2x00dev))
+			rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
+		else
+			rt2800_rfcsr_write(rt2x00dev, 61, 0xb5);
+	}
 	rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
 	rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
 
@@ -6602,7 +6608,6 @@ static void rt2800_init_rfcsr_5592(struct rt2x00_dev *rt2x00dev)
 
 	rt2800_rfcsr_write(rt2x00dev, 1, 0x3F);
 	rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
-	rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
 	rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
 	rt2800_rfcsr_write(rt2x00dev, 6, 0xE4);
 	rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index a94ba447e63c..3019db637a4b 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -14,9 +14,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #ifndef RT2800LIB_H
diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.c b/drivers/net/wireless/rt2x00/rt2800mmio.c
index a8cc736b5063..de4790b41be7 100644
--- a/drivers/net/wireless/rt2x00/rt2800mmio.c
+++ b/drivers/net/wireless/rt2x00/rt2800mmio.c
@@ -19,9 +19,7 @@
  *	GNU General Public License for more details.
  *
  *	You should have received a copy of the GNU General Public License
- *	along with this program; if not, write to the
- *	Free Software Foundation, Inc.,
- *	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*	Module: rt2800mmio
diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.h b/drivers/net/wireless/rt2x00/rt2800mmio.h
index 6a10de3eee3e..b63312ce3f27 100644
--- a/drivers/net/wireless/rt2x00/rt2800mmio.h
+++ b/drivers/net/wireless/rt2x00/rt2800mmio.h
@@ -19,9 +19,7 @@
  *	GNU General Public License for more details.
  *
  *	You should have received a copy of the GNU General Public License
- *	along with this program; if not, write to the
- *	Free Software Foundation, Inc.,
- *	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*	Module: rt2800mmio
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index b504455b4fec..a5b32ca2cf0f 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -20,9 +20,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
index a81c9ee281c0..9dfef4607d6b 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.h
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -20,9 +20,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2800soc.c b/drivers/net/wireless/rt2x00/rt2800soc.c
index 1359227ca411..f6d1bf5be006 100644
--- a/drivers/net/wireless/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/rt2x00/rt2800soc.c
@@ -19,9 +19,7 @@
  *	GNU General Public License for more details.
  *
  *	You should have received a copy of the GNU General Public License
- *	along with this program; if not, write to the
- *	Free Software Foundation, Inc.,
- *	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*	Module: rt2800soc
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index a81ceb61d746..caddc1b427a9 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -18,9 +18,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
@@ -31,7 +29,6 @@
 
 #include <linux/delay.h>
 #include <linux/etherdevice.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/usb.h>
@@ -992,6 +989,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
 	{ USB_DEVICE(0x07d1, 0x3c15) },
 	{ USB_DEVICE(0x07d1, 0x3c16) },
 	{ USB_DEVICE(0x07d1, 0x3c17) },
+	{ USB_DEVICE(0x2001, 0x3317) },
 	{ USB_DEVICE(0x2001, 0x3c1b) },
 	/* Draytek */
 	{ USB_DEVICE(0x07fa, 0x7712) },
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index 671ea3592610..ea7cac095997 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -17,9 +17,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index e4ba2ce0f212..e3b885d8f7db 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -15,9 +15,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 8cb43f8f3efc..1122dc44c9fd 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index 3db0d99d9da7..a2fd05ba25ca 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 7f7baae5ae02..2e3d1645e68b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.h b/drivers/net/wireless/rt2x00/rt2x00debug.h
index e11d39bdfef7..e65712c235bd 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.h
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.h
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 9dd92a700442..2bde6729f5e6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -14,9 +14,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
@@ -567,10 +565,10 @@ static void rt2x00lib_rxdone_check_ba(struct rt2x00_dev *rt2x00dev,
 
 #undef TID_CHECK
 
-		if (!ether_addr_equal(ba->ra, entry->ta))
+		if (!ether_addr_equal_64bits(ba->ra, entry->ta))
 			continue;
 
-		if (!ether_addr_equal(ba->ta, entry->ra))
+		if (!ether_addr_equal_64bits(ba->ta, entry->ra))
 			continue;
 
 		/* Mark BAR since we received the according BA */
diff --git a/drivers/net/wireless/rt2x00/rt2x00dump.h b/drivers/net/wireless/rt2x00/rt2x00dump.h
index 063ebcce97f8..4c0e01b5d515 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dump.h
+++ b/drivers/net/wireless/rt2x00/rt2x00dump.h
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index 1b4254b4272d..fbae2799e3ee 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -14,9 +14,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.c b/drivers/net/wireless/rt2x00/rt2x00leds.c
index 997a6c89e66e..c681d04b506c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.c
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.c
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.h b/drivers/net/wireless/rt2x00/rt2x00leds.h
index 3b46f0c3332a..b2c5269570da 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.h
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.h
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 7f40ab8e1bd8..fb7c349ccc9c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -14,9 +14,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index c2b3b6629188..9b941c0c1264 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 2183e7978399..ddeb5a709aa3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.c b/drivers/net/wireless/rt2x00/rt2x00mmio.c
index 64b06c6abe58..6f236ea180aa 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mmio.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mmio.c
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.h b/drivers/net/wireless/rt2x00/rt2x00mmio.h
index cda3dbcf7ead..701c3127efb9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mmio.h
+++ b/drivers/net/wireless/rt2x00/rt2x00mmio.h
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 25da20e7e1f3..d93db4b0371b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
@@ -156,8 +154,6 @@ exit_release_regions:
 exit_disable_device:
 	pci_disable_device(pci_dev);
 
-	pci_set_drvdata(pci_dev, NULL);
-
 	return retval;
 }
 EXPORT_SYMBOL_GPL(rt2x00pci_probe);
@@ -177,7 +173,6 @@ void rt2x00pci_remove(struct pci_dev *pci_dev)
 	/*
 	 * Free the PCI device data.
 	 */
-	pci_set_drvdata(pci_dev, NULL);
 	pci_disable_device(pci_dev);
 	pci_release_regions(pci_dev);
 }
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 60d90b20f8b9..bc0ca5f58f38 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index a5d38e8ad9e4..5642ccceca7c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -15,9 +15,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index ebe117224979..c48125be0e34 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index 6f867eec49cc..3cc541d13d67 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.c b/drivers/net/wireless/rt2x00/rt2x00soc.c
index 9271a5fce0a8..69a0cdadb07f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00soc.c
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.c
@@ -14,9 +14,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.h b/drivers/net/wireless/rt2x00/rt2x00soc.h
index 474cbfc1efc7..9948d355e9a4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00soc.h
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.h
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 4e121627925d..10572452cc21 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -14,9 +14,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 323ca7b2b095..e7bcf62347d5 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index a5b69cb49012..24402984ee57 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
@@ -27,7 +25,6 @@
 #include <linux/crc-itu-t.h>
 #include <linux/delay.h>
 #include <linux/etherdevice.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 9bc6b6044e34..1442075a8382 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 1baf9c896dcd..a140170b1eb3 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
@@ -27,7 +25,6 @@
 #include <linux/crc-itu-t.h>
 #include <linux/delay.h>
 #include <linux/etherdevice.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 7577e0ba3877..4a4f235466d1 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -13,9 +13,7 @@
 	GNU General Public License for more details.
 
 	You should have received a copy of the GNU General Public License
-	along with this program; if not, write to the
-	Free Software Foundation, Inc.,
-	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+	along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index a91506b12a62..8ec17aad0e52 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -15,7 +15,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
index dc845693f321..b1bfee738937 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
@@ -19,7 +19,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <net/mac80211.h>
diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.c b/drivers/net/wireless/rtl818x/rtl8180/max2820.c
index a63c443c3c6f..eebf23976524 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/max2820.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/max2820.c
@@ -18,7 +18,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <net/mac80211.h>
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
index ee638d0749d6..d60a5f399022 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
@@ -15,7 +15,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <net/mac80211.h>
diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
index 7614d9ccc729..959b049827de 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
@@ -19,7 +19,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <net/mac80211.h>
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 9a6edb0c014e..fd78df813a85 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -20,7 +20,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/init.h>
 #include <linux/usb.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
@@ -416,7 +415,7 @@ static int rtl8187_init_urbs(struct ieee80211_hw *dev)
 	struct rtl8187_rx_info *info;
 	int ret = 0;
 
-	while (skb_queue_len(&priv->rx_queue) < 16) {
+	while (skb_queue_len(&priv->rx_queue) < 32) {
 		skb = __dev_alloc_skb(RTL8187_MAX_RX, GFP_KERNEL);
 		if (!skb) {
 			ret = -ENOMEM;
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
index a26193a04447..5ecf18ed67b8 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
@@ -16,7 +16,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/init.h>
 #include <linux/usb.h>
 #include <net/mac80211.h>
 
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index ff784072fb42..93bb384eb001 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -353,7 +353,6 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
 
 	/* TODO: Correct this value for our hw */
 	/* TODO: define these hard code value */
-	hw->channel_change_time = 100;
 	hw->max_listen_interval = 10;
 	hw->max_rate_tries = 4;
 	/* hw->max_rates = 1; */
@@ -1293,7 +1292,7 @@ void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb)
 		return;
 
 	/* and only beacons from the associated BSSID, please */
-	if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+	if (!ether_addr_equal_64bits(hdr->addr3, rtlpriv->mac80211.bssid))
 		return;
 
 	rtlpriv->link_info.bcn_rx_inperiod++;
@@ -1437,7 +1436,8 @@ void rtl_watchdog_wq_callback(void *data)
 			/* if we can't recv beacon for 6s, we should
 			 * reconnect this AP
 			 */
-			if (rtlpriv->link_info.roam_times >= 3) {
+			if ((rtlpriv->link_info.roam_times >= 3) &&
+			    !is_zero_ether_addr(rtlpriv->mac80211.bssid)) {
 				RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
 					 "AP off, try to reconnect now\n");
 				rtlpriv->link_info.roam_times = 0;
@@ -1780,7 +1780,7 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
 		return;
 
 	/* and only beacons from the associated BSSID, please */
-	if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+	if (!ether_addr_equal_64bits(hdr->addr3, rtlpriv->mac80211.bssid))
 		return;
 
 	if (rtl_find_221_ie(hw, data, len))
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index 0e510f73041a..0276153c72cc 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -295,7 +295,7 @@ u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr)
 	/* Does STA already exist? */
 	for (i = 4; i < TOTAL_CAM_ENTRY; i++) {
 		addr = rtlpriv->sec.hwsec_cam_sta_addr[i];
-		if (memcmp(addr, sta_addr, ETH_ALEN) == 0)
+		if (ether_addr_equal_unaligned(addr, sta_addr))
 			return i;
 	}
 	/* Get a free CAM entry. */
@@ -335,7 +335,7 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
 		addr = rtlpriv->sec.hwsec_cam_sta_addr[i];
 		bitmap = (rtlpriv->sec.hwsec_cam_bitmap) >> i;
 		if (((bitmap & BIT(0)) == BIT(0)) &&
-		    (memcmp(addr, sta_addr, ETH_ALEN) == 0)) {
+		    (ether_addr_equal_unaligned(addr, sta_addr))) {
 			/* Remove from HW Security CAM */
 			eth_zero_addr(rtlpriv->sec.hwsec_cam_sta_addr[i]);
 			rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 210ce7cd94d8..2d337a0c3df0 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -46,10 +46,20 @@ void rtl_fw_cb(const struct firmware *firmware, void *context)
 			 "Firmware callback routine entered!\n");
 	complete(&rtlpriv->firmware_loading_complete);
 	if (!firmware) {
+		if (rtlpriv->cfg->alt_fw_name) {
+			err = request_firmware(&firmware,
+					       rtlpriv->cfg->alt_fw_name,
+					       rtlpriv->io.dev);
+			pr_info("Loading alternative firmware %s\n",
+				rtlpriv->cfg->alt_fw_name);
+			if (!err)
+				goto found_alt;
+		}
 		pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name);
 		rtlpriv->max_fw_size = 0;
 		return;
 	}
+found_alt:
 	if (firmware->size > rtlpriv->max_fw_size) {
 		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
 			 "Firmware is too big!\n");
@@ -184,6 +194,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
 					rtlpriv->cfg->maps
 					[RTL_IBSS_INT_MASKS]);
 		}
+		mac->link_state = MAC80211_LINKED;
 		break;
 	case NL80211_IFTYPE_ADHOC:
 		RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 5a53195d016b..d7aa165fe677 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -688,8 +688,6 @@ static void _rtl_receive_one(struct ieee80211_hw *hw, struct sk_buff *skb,
 		rtlpriv->stats.rxbytesunicast += skb->len;
 	}
 
-	rtl_is_special_data(hw, skb, false);
-
 	if (ieee80211_is_data(fc)) {
 		rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
 
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 0d81f766fd0f..deedae3c5449 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -478,7 +478,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
 		return;
 
 	/* and only beacons from the associated BSSID, please */
-	if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+	if (!ether_addr_equal_64bits(hdr->addr3, rtlpriv->mac80211.bssid))
 		return;
 
 	rtlpriv->psc.last_beacon = jiffies;
@@ -923,7 +923,7 @@ void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
 		return;
 
 	/* and only beacons from the associated BSSID, please */
-	if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
+	if (!ether_addr_equal_64bits(hdr->addr3, rtlpriv->mac80211.bssid))
 		return;
 
 	/* check if this really is a beacon */
diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/rtlwifi/regd.c
index d7d0d4948b01..a4eb9b271438 100644
--- a/drivers/net/wireless/rtlwifi/regd.c
+++ b/drivers/net/wireless/rtlwifi/regd.c
@@ -59,30 +59,26 @@ static struct country_code_to_enum_rd allCountries[] = {
  */
 #define RTL819x_2GHZ_CH12_13	\
 	REG_RULE(2467-10, 2472+10, 40, 0, 20,\
-	NL80211_RRF_PASSIVE_SCAN)
+		 NL80211_RRF_NO_IR)
 
 #define RTL819x_2GHZ_CH14	\
 	REG_RULE(2484-10, 2484+10, 40, 0, 20, \
-	NL80211_RRF_PASSIVE_SCAN | \
-	NL80211_RRF_NO_OFDM)
+		 NL80211_RRF_NO_IR | NL80211_RRF_NO_OFDM)
 
 /* 5G chan 36 - chan 64*/
 #define RTL819x_5GHZ_5150_5350	\
 	REG_RULE(5150-10, 5350+10, 40, 0, 30, \
-	NL80211_RRF_PASSIVE_SCAN | \
-	NL80211_RRF_NO_IBSS)
+		 NL80211_RRF_NO_IR)
 
 /* 5G chan 100 - chan 165*/
 #define RTL819x_5GHZ_5470_5850	\
 	REG_RULE(5470-10, 5850+10, 40, 0, 30, \
-	NL80211_RRF_PASSIVE_SCAN | \
-	NL80211_RRF_NO_IBSS)
+		 NL80211_RRF_NO_IR)
 
 /* 5G chan 149 - chan 165*/
 #define RTL819x_5GHZ_5725_5850	\
 	REG_RULE(5725-10, 5850+10, 40, 0, 30, \
-	NL80211_RRF_PASSIVE_SCAN | \
-	NL80211_RRF_NO_IBSS)
+		 NL80211_RRF_NO_IR)
 
 #define RTL819x_5GHZ_ALL	\
 	(RTL819x_5GHZ_5150_5350, RTL819x_5GHZ_5470_5850)
@@ -172,7 +168,8 @@ static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
 			    (ch->flags & IEEE80211_CHAN_RADAR))
 				continue;
 			if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
-				reg_rule = freq_reg_info(wiphy, ch->center_freq);
+				reg_rule = freq_reg_info(wiphy,
+							 MHZ_TO_KHZ(ch->center_freq));
 				if (IS_ERR(reg_rule))
 					continue;
 
@@ -185,16 +182,11 @@ static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
 				 *regulatory_hint().
 				 */
 
-				if (!(reg_rule->flags & NL80211_RRF_NO_IBSS))
-					ch->flags &= ~IEEE80211_CHAN_NO_IBSS;
-				if (!(reg_rule->
-				     flags & NL80211_RRF_PASSIVE_SCAN))
-					ch->flags &=
-					    ~IEEE80211_CHAN_PASSIVE_SCAN;
+				if (!(reg_rule->flags & NL80211_RRF_NO_IR))
+					ch->flags &= ~IEEE80211_CHAN_NO_IR;
 			} else {
 				if (ch->beacon_found)
-					ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
-						  IEEE80211_CHAN_PASSIVE_SCAN);
+					ch->flags &= ~IEEE80211_CHAN_NO_IR;
 			}
 		}
 	}
@@ -219,11 +211,11 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
 	 */
 	if (initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) {
 		ch = &sband->channels[11];	/* CH 12 */
-		if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
-			ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+		if (ch->flags & IEEE80211_CHAN_NO_IR)
+			ch->flags &= ~IEEE80211_CHAN_NO_IR;
 		ch = &sband->channels[12];	/* CH 13 */
-		if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
-			ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+		if (ch->flags & IEEE80211_CHAN_NO_IR)
+			ch->flags &= ~IEEE80211_CHAN_NO_IR;
 		return;
 	}
 
@@ -235,19 +227,19 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
 	 */
 
 	ch = &sband->channels[11];	/* CH 12 */
-	reg_rule = freq_reg_info(wiphy, ch->center_freq);
+	reg_rule = freq_reg_info(wiphy, MHZ_TO_KHZ(ch->center_freq));
 	if (!IS_ERR(reg_rule)) {
-		if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
-			if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
-				ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+		if (!(reg_rule->flags & NL80211_RRF_NO_IR))
+			if (ch->flags & IEEE80211_CHAN_NO_IR)
+				ch->flags &= ~IEEE80211_CHAN_NO_IR;
 	}
 
 	ch = &sband->channels[12];	/* CH 13 */
-	reg_rule = freq_reg_info(wiphy, ch->center_freq);
+	reg_rule = freq_reg_info(wiphy, MHZ_TO_KHZ(ch->center_freq));
 	if (!IS_ERR(reg_rule)) {
-		if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
-			if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
-				ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+		if (!(reg_rule->flags & NL80211_RRF_NO_IR))
+			if (ch->flags & IEEE80211_CHAN_NO_IR)
+				ch->flags &= ~IEEE80211_CHAN_NO_IR;
 	}
 }
 
@@ -284,8 +276,7 @@ static void _rtl_reg_apply_radar_flags(struct wiphy *wiphy)
 		 */
 		if (!(ch->flags & IEEE80211_CHAN_DISABLED))
 			ch->flags |= IEEE80211_CHAN_RADAR |
-			    IEEE80211_CHAN_NO_IBSS |
-			    IEEE80211_CHAN_PASSIVE_SCAN;
+				     IEEE80211_CHAN_NO_IR;
 	}
 }
 
@@ -354,9 +345,9 @@ static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg,
 
 	wiphy->reg_notifier = reg_notifier;
 
-	wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
-	wiphy->flags &= ~WIPHY_FLAG_STRICT_REGULATORY;
-	wiphy->flags &= ~WIPHY_FLAG_DISABLE_BEACON_HINTS;
+	wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
+	wiphy->regulatory_flags &= ~REGULATORY_STRICT_REG;
+	wiphy->regulatory_flags &= ~REGULATORY_DISABLE_BEACON_HINTS;
 
 	regd = _rtl_regdomain_select(reg);
 	wiphy_apply_custom_regulatory(wiphy, regd);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
index 21a5cf060677..a6184b6e1d57 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
@@ -1078,7 +1078,7 @@ static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
 				rtldm->swing_flag_ofdm = true;
 			}
 
-			if (rtldm->swing_idx_cck != rtldm->swing_idx_cck) {
+			if (rtldm->swing_idx_cck_cur != rtldm->swing_idx_cck) {
 				rtldm->swing_idx_cck_cur = rtldm->swing_idx_cck;
 				rtldm->swing_flag_cck = true;
 			}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index e9caa5d4cff0..eb78fd8607f7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -158,6 +158,42 @@ static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
 	{0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
 };
 
+static u32 power_index_reg[6] = {0xc90, 0xc91, 0xc92, 0xc98, 0xc99, 0xc9a};
+
+void dm_restorepowerindex(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8	index;
+
+	for (index = 0; index < 6; index++)
+		rtl_write_byte(rtlpriv, power_index_reg[index],
+			       rtlpriv->dm.powerindex_backup[index]);
+}
+EXPORT_SYMBOL_GPL(dm_restorepowerindex);
+
+void dm_writepowerindex(struct ieee80211_hw *hw, u8 value)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 index;
+
+	for (index = 0; index < 6; index++)
+		rtl_write_byte(rtlpriv, power_index_reg[index], value);
+}
+EXPORT_SYMBOL_GPL(dm_writepowerindex);
+
+void dm_savepowerindex(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u8 index;
+	u8 tmp;
+
+	for (index = 0; index < 6; index++) {
+		tmp = rtl_read_byte(rtlpriv, power_index_reg[index]);
+		rtlpriv->dm.powerindex_backup[index] = tmp;
+	}
+}
+EXPORT_SYMBOL_GPL(dm_savepowerindex);
+
 static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -180,7 +216,12 @@ static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
 	dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
 	dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
 	dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
-	dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
+	dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LowRssi;
+
+	dm_digtable->forbidden_igi = DM_DIG_MIN;
+	dm_digtable->large_fa_hit = 0;
+	dm_digtable->recover_cnt = 0;
+	dm_digtable->dig_dynamic_min  = 0x25;
 }
 
 static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
@@ -206,7 +247,9 @@ static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
 		rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb;
 	}
 
-	return (u8) rssi_val_min;
+	if (rssi_val_min > 100)
+		rssi_val_min = 100;
+	return (u8)rssi_val_min;
 }
 
 static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
@@ -224,9 +267,17 @@ static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
 
 	ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
 	falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
+
+	 ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
+	falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff);
+	falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16);
+
 	falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
-	    falsealm_cnt->cnt_rate_illegal +
-	    falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail;
+				      falsealm_cnt->cnt_rate_illegal +
+				      falsealm_cnt->cnt_crc8_fail +
+				      falsealm_cnt->cnt_mcs_fail +
+				      falsealm_cnt->cnt_fast_fsync_fail +
+				      falsealm_cnt->cnt_sb_search_fail;
 
 	rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
 	ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
@@ -271,12 +322,14 @@ static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
 		value_igi++;
 	else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2)
 		value_igi += 2;
+
 	if (value_igi > DM_DIG_FA_UPPER)
 		value_igi = DM_DIG_FA_UPPER;
 	else if (value_igi < DM_DIG_FA_LOWER)
 		value_igi = DM_DIG_FA_LOWER;
+
 	if (rtlpriv->falsealm_cnt.cnt_all > 10000)
-		value_igi = 0x32;
+		value_igi = DM_DIG_FA_UPPER;
 
 	dm_digtable->cur_igvalue = value_igi;
 	rtl92c_dm_write_dig(hw);
@@ -286,32 +339,80 @@ static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct dig_t *digtable = &rtlpriv->dm_digtable;
+	u32 isbt;
+
+	/* modify DIG lower bound, deal with abnorally large false alarm */
+	if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
+		digtable->large_fa_hit++;
+		if (digtable->forbidden_igi < digtable->cur_igvalue) {
+			digtable->forbidden_igi = digtable->cur_igvalue;
+			digtable->large_fa_hit = 1;
+		}
 
-	if (rtlpriv->falsealm_cnt.cnt_all > digtable->fa_highthresh) {
-		if ((digtable->back_val - 2) < digtable->back_range_min)
-			digtable->back_val = digtable->back_range_min;
-		else
-			digtable->back_val -= 2;
-	} else if (rtlpriv->falsealm_cnt.cnt_all < digtable->fa_lowthresh) {
-		if ((digtable->back_val + 2) > digtable->back_range_max)
-			digtable->back_val = digtable->back_range_max;
-		else
-			digtable->back_val += 2;
+		if (digtable->large_fa_hit >= 3) {
+			if ((digtable->forbidden_igi + 1) >
+			    digtable->rx_gain_max)
+				digtable->rx_gain_min = digtable->rx_gain_max;
+			else
+				digtable->rx_gain_min = (digtable->forbidden_igi + 1);
+			digtable->recover_cnt = 3600; /* 3600=2hr */
+		}
+	} else {
+		/* Recovery mechanism for IGI lower bound */
+		if (digtable->recover_cnt != 0) {
+			digtable->recover_cnt--;
+		} else {
+			if (digtable->large_fa_hit == 0) {
+				if ((digtable->forbidden_igi-1) < DM_DIG_MIN) {
+					digtable->forbidden_igi = DM_DIG_MIN;
+					digtable->rx_gain_min = DM_DIG_MIN;
+				} else {
+					digtable->forbidden_igi--;
+					digtable->rx_gain_min = digtable->forbidden_igi + 1;
+				}
+			} else if (digtable->large_fa_hit == 3) {
+				digtable->large_fa_hit = 0;
+			}
+		}
+	}
+	if (rtlpriv->falsealm_cnt.cnt_all < 250) {
+		isbt = rtl_read_byte(rtlpriv, 0x4fd) & 0x01;
+
+		if (!isbt) {
+			if (rtlpriv->falsealm_cnt.cnt_all >
+			    digtable->fa_lowthresh) {
+				if ((digtable->back_val - 2) <
+				   digtable->back_range_min)
+					digtable->back_val = digtable->back_range_min;
+				else
+					digtable->back_val -= 2;
+			} else if (rtlpriv->falsealm_cnt.cnt_all <
+				   digtable->fa_lowthresh) {
+				if ((digtable->back_val + 2) >
+				    digtable->back_range_max)
+					digtable->back_val = digtable->back_range_max;
+				else
+					digtable->back_val += 2;
+			}
+		} else {
+			digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
+		}
+	} else {
+		/* Adjust initial gain by false alarm */
+		if (rtlpriv->falsealm_cnt.cnt_all > 1000)
+			digtable->cur_igvalue = digtable->pre_igvalue + 2;
+		else if (rtlpriv->falsealm_cnt.cnt_all > 750)
+			digtable->cur_igvalue = digtable->pre_igvalue + 1;
+		else if (rtlpriv->falsealm_cnt.cnt_all < 500)
+			digtable->cur_igvalue = digtable->pre_igvalue - 1;
 	}
 
-	if ((digtable->rssi_val_min + 10 - digtable->back_val) >
-	    digtable->rx_gain_max)
+	/* Check initial gain by upper/lower bound */
+	if (digtable->cur_igvalue > digtable->rx_gain_max)
 		digtable->cur_igvalue = digtable->rx_gain_max;
-	else if ((digtable->rssi_val_min + 10 -
-		  digtable->back_val) < digtable->rx_gain_min)
-		digtable->cur_igvalue = digtable->rx_gain_min;
-	else
-		digtable->cur_igvalue = digtable->rssi_val_min + 10 -
-		    digtable->back_val;
 
-	RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-		 "rssi_val_min = %x back_val %x\n",
-		 digtable->rssi_val_min, digtable->back_val);
+	if (digtable->cur_igvalue < digtable->rx_gain_min)
+		digtable->cur_igvalue = digtable->rx_gain_min;
 
 	rtl92c_dm_write_dig(hw);
 }
@@ -329,7 +430,7 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
 		multi_sta = true;
 
 	if (!multi_sta ||
-	    dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) {
+	    dm_digtable->cursta_cstate == DIG_STA_DISCONNECT) {
 		initialized = false;
 		dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
 		return;
@@ -375,7 +476,6 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
 		 "presta_cstate = %x, cursta_cstate = %x\n",
 		 dm_digtable->presta_cstate, dm_digtable->cursta_cstate);
-
 	if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate ||
 	    dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT ||
 	    dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
@@ -383,6 +483,8 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
 		if (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) {
 			dm_digtable->rssi_val_min =
 			    rtl92c_dm_initial_gain_min_pwdb(hw);
+			if (dm_digtable->rssi_val_min > 100)
+				dm_digtable->rssi_val_min = 100;
 			rtl92c_dm_ctrl_initgain_by_rssi(hw);
 		}
 	} else {
@@ -398,11 +500,12 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
 static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
 
 	if (dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
 		dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
+		if (dm_digtable->rssi_val_min > 100)
+			dm_digtable->rssi_val_min = 100;
 
 		if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
 			if (dm_digtable->rssi_val_min <= 25)
@@ -424,48 +527,14 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
 	}
 
 	if (dm_digtable->pre_cck_pd_state != dm_digtable->cur_cck_pd_state) {
-		if (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
-			if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
-				dm_digtable->cur_cck_fa_state =
-				    CCK_FA_STAGE_High;
-			else
-				dm_digtable->cur_cck_fa_state = CCK_FA_STAGE_Low;
-
-			if (dm_digtable->pre_cck_fa_state !=
-			    dm_digtable->cur_cck_fa_state) {
-				if (dm_digtable->cur_cck_fa_state ==
-				    CCK_FA_STAGE_Low)
-					rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
-						      0x83);
-				else
-					rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
-						      0xcd);
-
-				dm_digtable->pre_cck_fa_state =
-				    dm_digtable->cur_cck_fa_state;
-			}
-
-			rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
-
-			if (IS_92C_SERIAL(rtlhal->version))
-				rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
-					      MASKBYTE2, 0xd7);
-		} else {
+		if ((dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) ||
+		    (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_MAX))
+			rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83);
+		else
 			rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
-			rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
 
-			if (IS_92C_SERIAL(rtlhal->version))
-				rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
-					      MASKBYTE2, 0xd3);
-		}
 		dm_digtable->pre_cck_pd_state = dm_digtable->cur_cck_pd_state;
 	}
-
-	RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "CCKPDStage=%x\n",
-		 dm_digtable->cur_cck_pd_state);
-
-	RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "is92C=%x\n",
-		 IS_92C_SERIAL(rtlhal->version));
 }
 
 static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
@@ -482,6 +551,8 @@ static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
 	else
 		dm_digtable->cursta_cstate = DIG_STA_DISCONNECT;
 
+	dm_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
+
 	rtl92c_dm_initial_gain_sta(hw);
 	rtl92c_dm_initial_gain_multi_sta(hw);
 	rtl92c_dm_cck_packet_detection_thresh(hw);
@@ -493,23 +564,26 @@ static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
 static void rtl92c_dm_dig(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
 
 	if (rtlpriv->dm.dm_initialgain_enable == false)
 		return;
-	if (dm_digtable->dig_enable_flag == false)
+	if (!(rtlpriv->dm.dm_flag & DYNAMIC_FUNC_DIG))
 		return;
 
 	rtl92c_dm_ctrl_initgain_by_twoport(hw);
-
 }
 
 static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 
-	rtlpriv->dm.dynamic_txpower_enable = false;
-
+	if (rtlpriv->rtlhal.interface == INTF_USB &&
+	    rtlpriv->rtlhal.board_type & 0x1) {
+		dm_savepowerindex(hw);
+		rtlpriv->dm.dynamic_txpower_enable = true;
+	} else {
+		rtlpriv->dm.dynamic_txpower_enable = false;
+	}
 	rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
 	rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
 }
@@ -524,9 +598,14 @@ void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
 		 dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
 		 dm_digtable->back_val);
 
-	dm_digtable->cur_igvalue += 2;
-	if (dm_digtable->cur_igvalue > 0x3f)
-		dm_digtable->cur_igvalue = 0x3f;
+	if (rtlpriv->rtlhal.interface == INTF_USB &&
+	    !dm_digtable->dig_enable_flag) {
+		dm_digtable->pre_igvalue = 0x17;
+		return;
+	}
+	dm_digtable->cur_igvalue -= 1;
+	if (dm_digtable->cur_igvalue < DM_DIG_MIN)
+		dm_digtable->cur_igvalue = DM_DIG_MIN;
 
 	if (dm_digtable->pre_igvalue != dm_digtable->cur_igvalue) {
 		rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
@@ -536,11 +615,47 @@ void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
 
 		dm_digtable->pre_igvalue = dm_digtable->cur_igvalue;
 	}
+	RT_TRACE(rtlpriv, COMP_DIG, DBG_WARNING,
+		 "dig values 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+		 dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
+		 dm_digtable->rssi_val_min, dm_digtable->back_val,
+		 dm_digtable->rx_gain_max, dm_digtable->rx_gain_min,
+		 dm_digtable->large_fa_hit, dm_digtable->forbidden_igi);
 }
 EXPORT_SYMBOL(rtl92c_dm_write_dig);
 
 static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
 {
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+	long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
+
+	if (mac->link_state != MAC80211_LINKED)
+		return;
+
+	if (mac->opmode == NL80211_IFTYPE_ADHOC ||
+	    mac->opmode == NL80211_IFTYPE_AP) {
+		/* TODO: Handle ADHOC and AP Mode */
+	}
+
+	if (tmpentry_max_pwdb != 0)
+		rtlpriv->dm.entry_max_undec_sm_pwdb = tmpentry_max_pwdb;
+	else
+		rtlpriv->dm.entry_max_undec_sm_pwdb = 0;
+
+	if (tmpentry_min_pwdb != 0xff)
+		rtlpriv->dm.entry_min_undec_sm_pwdb = tmpentry_min_pwdb;
+	else
+		rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
+
+/* TODO:
+ *	if (mac->opmode == NL80211_IFTYPE_STATION) {
+ *		if (rtlpriv->rtlhal.fw_ready) {
+ *			u32 param = (u32)(rtlpriv->dm.undec_sm_pwdb << 16);
+ *			rtl8192c_set_rssi_cmd(hw, param);
+ *		}
+ *	}
+ */
 }
 
 void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
@@ -750,6 +865,7 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
 				rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
 			rtlpriv->dm.cck_index = cck_index_old;
 		}
+		/* Handle USB High PA boards */
 
 		delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
 		    (thermalvalue - rtlpriv->dm.thermalvalue) :
@@ -1140,22 +1256,22 @@ void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
-	static u8 initialize;
-	static u32 reg_874, reg_c70, reg_85c, reg_a74;
 
-	if (initialize == 0) {
-		reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
-					 MASKDWORD) & 0x1CC000) >> 14;
+	if (!rtlpriv->reg_init) {
+		rtlpriv->reg_874 = (rtl_get_bbreg(hw,
+						  RFPGA0_XCD_RFINTERFACESW,
+						  MASKDWORD) & 0x1CC000) >> 14;
 
-		reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
-					 MASKDWORD) & BIT(3)) >> 3;
+		rtlpriv->reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
+				    MASKDWORD) & BIT(3)) >> 3;
 
-		reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
-					 MASKDWORD) & 0xFF000000) >> 24;
+		rtlpriv->reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
+				    MASKDWORD) & 0xFF000000) >> 24;
 
-		reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12;
+		rtlpriv->reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) &
+				    0xF000) >> 12;
 
-		initialize = 1;
+		rtlpriv->reg_init = true;
 	}
 
 	if (!bforce_in_normal) {
@@ -1192,12 +1308,12 @@ void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
 			rtl_set_bbreg(hw, 0x818, BIT(28), 0x1);
 		} else {
 			rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
-				      0x1CC000, reg_874);
+				      0x1CC000, rtlpriv->reg_874);
 			rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3),
-				      reg_c70);
+				      rtlpriv->reg_c70);
 			rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000,
-				      reg_85c);
-			rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74);
+				      rtlpriv->reg_85c);
+			rtl_set_bbreg(hw, 0xa74, 0xF000, rtlpriv->reg_a74);
 			rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
 		}
 
@@ -1213,6 +1329,7 @@ static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 
+	/* Determine the minimum RSSI */
 	if (((mac->link_state == MAC80211_NOLINK)) &&
 	    (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
 		dm_pstable->rssi_val_min = 0;
@@ -1241,6 +1358,7 @@ static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
 			 dm_pstable->rssi_val_min);
 	}
 
+	/* Power Saving for 92C */
 	if (IS_92C_SERIAL(rtlhal->version))
 		;/* rtl92c_dm_1r_cca(hw); */
 	else
@@ -1252,12 +1370,23 @@ void rtl92c_dm_init(struct ieee80211_hw *hw)
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 
 	rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+	rtlpriv->dm.dm_flag = DYNAMIC_FUNC_DISABLE | DYNAMIC_FUNC_DIG;
+	rtlpriv->dm.undec_sm_pwdb = -1;
+	rtlpriv->dm.undec_sm_cck = -1;
+	rtlpriv->dm.dm_initialgain_enable = true;
 	rtl92c_dm_diginit(hw);
+
+	rtlpriv->dm.dm_flag |= HAL_DM_HIPWR_DISABLE;
 	rtl92c_dm_init_dynamic_txpower(hw);
+
 	rtl92c_dm_init_edca_turbo(hw);
 	rtl92c_dm_init_rate_adaptive_mask(hw);
+	rtlpriv->dm.dm_flag |= DYNAMIC_FUNC_SS;
 	rtl92c_dm_initialize_txpower_tracking(hw);
 	rtl92c_dm_init_dynamic_bb_powersaving(hw);
+
+	rtlpriv->dm.ofdm_pkt_cnt = 0;
+	rtlpriv->dm.dm_rssi_sel = RSSI_DEFAULT;
 }
 EXPORT_SYMBOL(rtl92c_dm_init);
 
@@ -1308,7 +1437,7 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
 	}
 
 	if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
-		rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
+		rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL2;
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
 			 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
 	} else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
@@ -1328,8 +1457,16 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
 			 "PHY_SetTxPowerLevel8192S() Channel = %d\n",
 			 rtlphy->current_channel);
 		rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
+		if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+		    TXHIGHPWRLEVEL_NORMAL)
+			dm_restorepowerindex(hw);
+		else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+			 TXHIGHPWRLEVEL_LEVEL1)
+			dm_writepowerindex(hw, 0x14);
+		else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+			 TXHIGHPWRLEVEL_LEVEL2)
+			dm_writepowerindex(hw, 0x10);
 	}
-
 	rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
 }
 
@@ -1400,12 +1537,6 @@ u8 rtl92c_bt_rssi_state_change(struct ieee80211_hw *hw)
 	else
 		curr_bt_rssi_state &= (~BT_RSSI_STATE_SPECIAL_LOW);
 
-	/* Set Tx Power according to BT status. */
-	if (undec_sm_pwdb >= 30)
-		curr_bt_rssi_state |=  BT_RSSI_STATE_TXPOWER_LOW;
-	else if (undec_sm_pwdb < 25)
-		curr_bt_rssi_state &= (~BT_RSSI_STATE_TXPOWER_LOW);
-
 	/* Check BT state related to BT_Idle in B/G mode. */
 	if (undec_sm_pwdb < 15)
 		curr_bt_rssi_state |=  BT_RSSI_STATE_BG_EDCA_LOW;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
index 518e208c0180..4f232a063636 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
@@ -91,6 +91,17 @@
 #define TX_POWER_NEAR_FIELD_THRESH_LVL2		74
 #define TX_POWER_NEAR_FIELD_THRESH_LVL1		67
 
+#define DYNAMIC_FUNC_DISABLE			0x0
+#define DYNAMIC_FUNC_DIG			BIT(0)
+#define DYNAMIC_FUNC_HP				BIT(1)
+#define DYNAMIC_FUNC_SS				BIT(2) /*Tx Power Tracking*/
+#define DYNAMIC_FUNC_BT				BIT(3)
+#define DYNAMIC_FUNC_ANT_DIV			BIT(4)
+
+#define	RSSI_CCK				0
+#define	RSSI_OFDM				1
+#define	RSSI_DEFAULT				2
+
 struct swat_t {
 	u8 failure_cnt;
 	u8 try_flag;
@@ -167,5 +178,8 @@ void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
 void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
 void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw);
 void rtl92c_dm_bt_coexist(struct ieee80211_hw *hw);
+void dm_savepowerindex(struct ieee80211_hw *hw);
+void dm_writepowerindex(struct ieee80211_hw *hw, u8 value);
+void dm_restorepowerindex(struct ieee80211_hw *hw);
 
 #endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 0c0e78263a66..9e32ac8a4425 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -1147,6 +1147,12 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
 		0x522, 0x550, 0x551, 0x040
 	};
 
+	u32 iqk_bb_reg_92C[9] = {
+		0xc04, 0xc08, 0x874, 0xb68,
+		0xb6c, 0x870, 0x860, 0x864,
+		0x800
+	};
+
 	const u32 retrycount = 2;
 
 	if (t == 0) {
@@ -1157,6 +1163,8 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
 						rtlphy->adda_backup, 16);
 		_rtl92c_phy_save_mac_registers(hw, iqk_mac_reg,
 					       rtlphy->iqk_mac_backup);
+		_rtl92c_phy_save_adda_registers(hw, iqk_bb_reg_92C,
+						rtlphy->iqk_bb_backup, 9);
 	}
 	_rtl92c_phy_path_adda_on(hw, adda_reg, true, is2t);
 	if (t == 0) {
@@ -1167,14 +1175,18 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
 
 	if (!rtlphy->rfpi_enable)
 		_rtl92c_phy_pi_mode_switch(hw, true);
-	if (t == 0) {
-		rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
-		rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
-		rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD);
-	}
+
+	rtl_set_bbreg(hw, 0x800, BIT(24), 0x0);
+
 	rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
 	rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
 	rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
+
+	rtl_set_bbreg(hw, 0x870, BIT(10), 0x1);
+	rtl_set_bbreg(hw, 0x870, BIT(26), 0x1);
+	rtl_set_bbreg(hw, 0x860, BIT(10), 0x0);
+	rtl_set_bbreg(hw, 0x864, BIT(10), 0x0);
+
 	if (is2t) {
 		rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
 		rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
@@ -1239,13 +1251,9 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
 					0x3FF0000) >> 16;
 		}
 	}
-	rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04);
-	rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874);
-	rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08);
+
 	rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
-	rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
-	if (is2t)
-		rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
+
 	if (t != 0) {
 		if (!rtlphy->rfpi_enable)
 			_rtl92c_phy_pi_mode_switch(hw, false);
@@ -1253,6 +1261,15 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
 						  rtlphy->adda_backup, 16);
 		_rtl92c_phy_reload_mac_registers(hw, iqk_mac_reg,
 						 rtlphy->iqk_mac_backup);
+		_rtl92c_phy_reload_adda_registers(hw, iqk_bb_reg_92C,
+						  rtlphy->iqk_bb_backup, 9);
+
+		rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
+		if (is2t)
+			rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
+
+		rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x01008c00);
+		rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x01008c00);
 	}
 }
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
index 16a0b9e59acf..c16209a336ea 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
@@ -101,6 +101,15 @@ void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
 			 "PHY_SetTxPowerLevel8192S() Channel = %d\n",
 			 rtlphy->current_channel);
 		rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
+		if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+		    TXHIGHPWRLEVEL_NORMAL)
+			dm_restorepowerindex(hw);
+		else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+			 TXHIGHPWRLEVEL_LEVEL1)
+			dm_writepowerindex(hw, 0x14);
+		else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+			 TXHIGHPWRLEVEL_LEVEL2)
+			dm_writepowerindex(hw, 0x10);
 	}
 
 	rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
index d947e7d350bb..fafa6bac2a3f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
@@ -30,3 +30,6 @@
 #include "../rtl8192ce/dm.h"
 
 void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw);
+void dm_savepowerindex(struct ieee80211_hw *hw);
+void dm_writepowerindex(struct ieee80211_hw *hw, u8 value);
+void dm_restorepowerindex(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 189ba124a8c6..468bf73cc883 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1022,7 +1022,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
 	if (ppsc->rfpwr_state == ERFON) {
 		rtl92c_phy_set_rfpath_switch(hw, 1);
 		if (iqk_initialized) {
-			rtl92c_phy_iq_calibrate(hw, false);
+			rtl92c_phy_iq_calibrate(hw, true);
 		} else {
 			rtl92c_phy_iq_calibrate(hw, false);
 			iqk_initialized = true;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 34e56308301e..0c09240eadcc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -120,6 +120,7 @@ bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw)
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 	u16 regval;
+	u32 regval32;
 	u8 b_reg_hwparafile = 1;
 
 	_rtl92c_phy_init_bb_rf_register_definition(hw);
@@ -135,8 +136,11 @@ bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw)
 	} else if (IS_HARDWARE_TYPE_8192CU(rtlhal)) {
 		rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_USBA | FEN_USBD |
 			       FEN_BB_GLB_RSTn | FEN_BBRSTB);
-		rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
 	}
+	regval32 = rtl_read_dword(rtlpriv, 0x87c);
+	rtl_write_dword(rtlpriv, 0x87c, regval32 & (~BIT(31)));
+	if (IS_HARDWARE_TYPE_8192CU(rtlhal))
+		rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
 	rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
 	if (b_reg_hwparafile == 1)
 		rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
index 2119313a737b..b878d56d2f4d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -85,17 +85,15 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
 	if (mac->act_scanning) {
 		tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
 		tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
-		if (turbo_scanoff) {
-			for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
-				tx_agc[idx1] = ppowerlevel[idx1] |
-				    (ppowerlevel[idx1] << 8) |
-				    (ppowerlevel[idx1] << 16) |
-				    (ppowerlevel[idx1] << 24);
-				if (rtlhal->interface == INTF_USB) {
-					if (tx_agc[idx1] > 0x20 &&
-					    rtlefuse->external_pa)
-						tx_agc[idx1] = 0x20;
-				}
+		for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+			tx_agc[idx1] = ppowerlevel[idx1] |
+			    (ppowerlevel[idx1] << 8) |
+			    (ppowerlevel[idx1] << 16) |
+			    (ppowerlevel[idx1] << 24);
+			if (rtlhal->interface == INTF_USB) {
+				if (tx_agc[idx1] > 0x20 &&
+				    rtlefuse->external_pa)
+					tx_agc[idx1] = 0x20;
 			}
 		}
 	} else {
@@ -107,7 +105,7 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
 			   TXHIGHPWRLEVEL_LEVEL2) {
 			tx_agc[RF90_PATH_A] = 0x00000000;
 			tx_agc[RF90_PATH_B] = 0x00000000;
-		} else{
+		} else {
 			for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
 				tx_agc[idx1] = ppowerlevel[idx1] |
 				    (ppowerlevel[idx1] << 8) |
@@ -373,7 +371,12 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
 			    regoffset == RTXAGC_B_MCS07_MCS04)
 				regoffset = 0xc98;
 			for (i = 0; i < 3; i++) {
-				writeVal = (writeVal > 6) ? (writeVal - 6) : 0;
+				if (i != 2)
+					writeVal = (writeVal > 8) ?
+						   (writeVal - 8) : 0;
+				else
+					writeVal = (writeVal > 6) ?
+						   (writeVal - 6) : 0;
 				rtl_write_byte(rtlpriv, (u32)(regoffset + i),
 					      (u8)writeVal);
 			}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 9936de716ad5..c61311084d7e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -50,6 +50,9 @@ MODULE_AUTHOR("Larry Finger	<Larry.Finger@lwfinger.net>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n USB wireless");
 MODULE_FIRMWARE("rtlwifi/rtl8192cufw.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_A.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_B.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_TMSC.bin");
 
 static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
 {
@@ -69,14 +72,21 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
 			 "Can't alloc buffer for fw\n");
 		return 1;
 	}
-
+	if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) &&
+	    !IS_92C_SERIAL(rtlpriv->rtlhal.version)) {
+		rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_A.bin";
+	} else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlpriv->rtlhal.version)) {
+		rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_B.bin";
+	} else {
+		rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
+	}
+	/* provide name of alternative file */
+	rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin";
 	pr_info("Loading firmware %s\n", rtlpriv->cfg->fw_name);
 	rtlpriv->max_fw_size = 0x4000;
 	err = request_firmware_nowait(THIS_MODULE, 1,
 				      rtlpriv->cfg->fw_name, rtlpriv->io.dev,
 				      GFP_KERNEL, hw, rtl_fw_cb);
-
-
 	return err;
 }
 
@@ -307,6 +317,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
 	{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
 	{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
 	{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+	{RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/
 	{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
 	{RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
 	/* HP - Lite-On ,8188CUS Slim Combo */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
index 966be519edb8..7903c154de00 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
@@ -36,7 +36,7 @@ u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH] = {
 	0x804, 0x00000003,
 	0x808, 0x0000fc00,
 	0x80c, 0x0000000a,
-	0x810, 0x10005388,
+	0x810, 0x10000330,
 	0x814, 0x020c3d10,
 	0x818, 0x02200385,
 	0x81c, 0x00000000,
@@ -110,22 +110,22 @@ u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH] = {
 	0xc44, 0x000100b7,
 	0xc48, 0xec020107,
 	0xc4c, 0x007f037f,
-	0xc50, 0x6954341e,
+	0xc50, 0x69543420,
 	0xc54, 0x43bc0094,
-	0xc58, 0x6954341e,
+	0xc58, 0x69543420,
 	0xc5c, 0x433c0094,
 	0xc60, 0x00000000,
 	0xc64, 0x5116848b,
 	0xc68, 0x47c00bff,
 	0xc6c, 0x00000036,
 	0xc70, 0x2c7f000d,
-	0xc74, 0x0186115b,
+	0xc74, 0x2186115b,
 	0xc78, 0x0000001f,
 	0xc7c, 0x00b99612,
 	0xc80, 0x40000100,
 	0xc84, 0x20f60000,
 	0xc88, 0x40000100,
-	0xc8c, 0x20200000,
+	0xc8c, 0xa0e40000,
 	0xc90, 0x00121820,
 	0xc94, 0x00000000,
 	0xc98, 0x00121820,
@@ -226,7 +226,7 @@ u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH] = {
 	0x804, 0x00000001,
 	0x808, 0x0000fc00,
 	0x80c, 0x0000000a,
-	0x810, 0x10005388,
+	0x810, 0x10000330,
 	0x814, 0x020c3d10,
 	0x818, 0x02200385,
 	0x81c, 0x00000000,
@@ -300,9 +300,9 @@ u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH] = {
 	0xc44, 0x000100b7,
 	0xc48, 0xec020107,
 	0xc4c, 0x007f037f,
-	0xc50, 0x6954341e,
+	0xc50, 0x69543420,
 	0xc54, 0x43bc0094,
-	0xc58, 0x6954341e,
+	0xc58, 0x69543420,
 	0xc5c, 0x433c0094,
 	0xc60, 0x00000000,
 	0xc64, 0x5116848b,
@@ -340,7 +340,7 @@ u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH] = {
 	0xce4, 0x00000000,
 	0xce8, 0x37644302,
 	0xcec, 0x2f97d40c,
-	0xd00, 0x00080740,
+	0xd00, 0x00000740,
 	0xd04, 0x00020401,
 	0xd08, 0x0000907f,
 	0xd0c, 0x20010201,
@@ -633,17 +633,17 @@ u32 RTL8192CURADIOA_2TARRAY[RTL8192CURADIOA_2TARRAYLENGTH] = {
 	0x012, 0x00071000,
 	0x012, 0x000b0000,
 	0x012, 0x000fc000,
-	0x013, 0x000287af,
+	0x013, 0x000287b3,
 	0x013, 0x000244b7,
 	0x013, 0x000204ab,
 	0x013, 0x0001c49f,
 	0x013, 0x00018493,
-	0x013, 0x00014297,
-	0x013, 0x00010295,
-	0x013, 0x0000c298,
-	0x013, 0x0000819c,
-	0x013, 0x000040a8,
-	0x013, 0x0000001c,
+	0x013, 0x0001429b,
+	0x013, 0x00010299,
+	0x013, 0x0000c29c,
+	0x013, 0x000081a0,
+	0x013, 0x000040ac,
+	0x013, 0x00000020,
 	0x014, 0x0001944c,
 	0x014, 0x00059444,
 	0x014, 0x0009944c,
@@ -932,10 +932,10 @@ u32 RTL8192CUMAC_2T_ARRAY[RTL8192CUMAC_2T_ARRAYLENGTH] = {
 	0x608, 0x0000000e,
 	0x609, 0x0000002a,
 	0x652, 0x00000020,
-	0x63c, 0x0000000a,
-	0x63d, 0x0000000e,
-	0x63e, 0x0000000a,
-	0x63f, 0x0000000e,
+	0x63c, 0x00000008,
+	0x63d, 0x00000008,
+	0x63e, 0x0000000c,
+	0x63f, 0x0000000c,
 	0x66e, 0x00000005,
 	0x700, 0x00000021,
 	0x701, 0x00000043,
diff --git a/drivers/net/wireless/rtlwifi/stats.c b/drivers/net/wireless/rtlwifi/stats.c
index 8ed31744a054..4f083fc1d360 100644
--- a/drivers/net/wireless/rtlwifi/stats.c
+++ b/drivers/net/wireless/rtlwifi/stats.c
@@ -176,6 +176,7 @@ static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
 	struct rtl_sta_info *drv_priv = NULL;
 	struct ieee80211_sta *sta = NULL;
 	long undec_sm_pwdb;
+	long undec_sm_cck;
 
 	rcu_read_lock();
 	if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
@@ -185,12 +186,16 @@ static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
 	if (sta) {
 		drv_priv = (struct rtl_sta_info *) sta->drv_priv;
 		undec_sm_pwdb = drv_priv->rssi_stat.undec_sm_pwdb;
+		undec_sm_cck = drv_priv->rssi_stat.undec_sm_cck;
 	} else {
 		undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
+		undec_sm_cck = rtlpriv->dm.undec_sm_cck;
 	}
 
 	if (undec_sm_pwdb < 0)
 		undec_sm_pwdb = pstatus->rx_pwdb_all;
+	if (undec_sm_cck < 0)
+		undec_sm_cck = pstatus->rx_pwdb_all;
 	if (pstatus->rx_pwdb_all > (u32) undec_sm_pwdb) {
 		undec_sm_pwdb = (((undec_sm_pwdb) *
 		      (RX_SMOOTH_FACTOR - 1)) +
@@ -200,6 +205,15 @@ static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
 		undec_sm_pwdb = (((undec_sm_pwdb) * (RX_SMOOTH_FACTOR - 1)) +
 		     (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
 	}
+	if (pstatus->rx_pwdb_all > (u32) undec_sm_cck) {
+		undec_sm_cck = (((undec_sm_pwdb) *
+		      (RX_SMOOTH_FACTOR - 1)) +
+		     (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+		undec_sm_cck = undec_sm_cck + 1;
+	} else {
+		undec_sm_pwdb = (((undec_sm_cck) * (RX_SMOOTH_FACTOR - 1)) +
+		     (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+	}
 
 	if (sta) {
 		drv_priv->rssi_stat.undec_sm_pwdb = undec_sm_pwdb;
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 6e2b5c5c83c8..4933f02ce1d5 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -475,14 +475,14 @@ static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
 			rtlpriv->stats.rxbytesunicast +=  skb->len;
 		}
 
-		rtl_is_special_data(hw, skb, false);
-
 		if (ieee80211_is_data(fc)) {
 			rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
 
 			if (unicast)
 				rtlpriv->link_info.num_rx_inperiod++;
 		}
+		/* static bcn for roaming */
+		rtl_beacon_statistic(hw, skb);
 	}
 }
 
@@ -517,8 +517,6 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
 			rtlpriv->stats.rxbytesunicast +=  skb->len;
 		}
 
-		rtl_is_special_data(hw, skb, false);
-
 		if (ieee80211_is_data(fc)) {
 			rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
 
@@ -553,7 +551,7 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
 	}
 }
 
-#define __RX_SKB_MAX_QUEUED	32
+#define __RX_SKB_MAX_QUEUED	64
 
 static void _rtl_rx_work(unsigned long param)
 {
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 0c65386fa30d..8c647391bedf 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1033,6 +1033,7 @@ struct rtl_ht_agg {
 
 struct rssi_sta {
 	long undec_sm_pwdb;
+	long undec_sm_cck;
 };
 
 struct rtl_tid_data {
@@ -1323,8 +1324,10 @@ struct fast_ant_training {
 struct rtl_dm {
 	/*PHY status for Dynamic Management */
 	long entry_min_undec_sm_pwdb;
+	long undec_sm_cck;
 	long undec_sm_pwdb;	/*out dm */
 	long entry_max_undec_sm_pwdb;
+	s32 ofdm_pkt_cnt;
 	bool dm_initialgain_enable;
 	bool dynamic_txpower_enable;
 	bool current_turbo_edca;
@@ -1339,6 +1342,7 @@ struct rtl_dm {
 	bool inform_fw_driverctrldm;
 	bool current_mrc_switch;
 	u8 txpowercount;
+	u8 powerindex_backup[6];
 
 	u8 thermalvalue_rxgain;
 	u8 thermalvalue_iqk;
@@ -1350,7 +1354,9 @@ struct rtl_dm {
 	bool done_txpower;
 	u8 dynamic_txhighpower_lvl;	/*Tx high power level */
 	u8 dm_flag;		/*Indicate each dynamic mechanism's status. */
+	u8 dm_flag_tmp;
 	u8 dm_type;
+	u8 dm_rssi_sel;
 	u8 txpower_track_control;
 	bool interrupt_migration;
 	bool disable_tx_int;
@@ -1804,6 +1810,7 @@ struct rtl_hal_cfg {
 	bool write_readback;
 	char *name;
 	char *fw_name;
+	char *alt_fw_name;
 	struct rtl_hal_ops *ops;
 	struct rtl_mod_params *mod_params;
 	struct rtl_hal_usbint_cfg *usb_interface_cfg;
@@ -1948,6 +1955,7 @@ struct dig_t {
 	u8 pre_ccastate;
 	u8 cur_ccasate;
 	u8 large_fa_hit;
+	u8 dig_dynamic_min;
 	u8 forbidden_igi;
 	u8 dig_state;
 	u8 dig_highpwrstate;
@@ -2028,22 +2036,15 @@ struct rtl_priv {
 	struct dig_t dm_digtable;
 	struct ps_t dm_pstable;
 
-	/* section shared by individual drivers */
-	union {
-		struct {	/* data buffer pointer for USB reads */
-			__le32 *usb_data;
-			int usb_data_index;
-			bool initialized;
-		};
-		struct {	/* section for 8723ae */
-			bool reg_init;	/* true if regs saved */
-			u32 reg_874;
-			u32 reg_c70;
-			u32 reg_85c;
-			u32 reg_a74;
-			bool bt_operation_on;
-		};
-	};
+	u32 reg_874;
+	u32 reg_c70;
+	u32 reg_85c;
+	u32 reg_a74;
+	bool reg_init;	/* true if regs saved */
+	bool bt_operation_on;
+	__le32 *usb_data;
+	int usb_data_index;
+	bool initialized;
 	bool enter_ps;	/* true when entering PS */
 	u8 rate_mask[5];
 
diff --git a/drivers/net/wireless/ti/wl1251/acx.c b/drivers/net/wireless/ti/wl1251/acx.c
index db6430c1a084..5a4ec56c83d0 100644
--- a/drivers/net/wireless/ti/wl1251/acx.c
+++ b/drivers/net/wireless/ti/wl1251/acx.c
@@ -18,10 +18,8 @@ int wl1251_acx_frame_rates(struct wl1251 *wl, u8 ctrl_rate, u8 ctrl_mod,
 	wl1251_debug(DEBUG_ACX, "acx frame rates");
 
 	rates = kzalloc(sizeof(*rates), GFP_KERNEL);
-	if (!rates) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!rates)
+		return -ENOMEM;
 
 	rates->tx_ctrl_frame_rate = ctrl_rate;
 	rates->tx_ctrl_frame_mod = ctrl_mod;
@@ -49,10 +47,8 @@ int wl1251_acx_station_id(struct wl1251 *wl)
 	wl1251_debug(DEBUG_ACX, "acx dot11_station_id");
 
 	mac = kzalloc(sizeof(*mac), GFP_KERNEL);
-	if (!mac) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!mac)
+		return -ENOMEM;
 
 	for (i = 0; i < ETH_ALEN; i++)
 		mac->mac[i] = wl->mac_addr[ETH_ALEN - 1 - i];
@@ -74,10 +70,8 @@ int wl1251_acx_default_key(struct wl1251 *wl, u8 key_id)
 	wl1251_debug(DEBUG_ACX, "acx dot11_default_key (%d)", key_id);
 
 	default_key = kzalloc(sizeof(*default_key), GFP_KERNEL);
-	if (!default_key) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!default_key)
+		return -ENOMEM;
 
 	default_key->id = key_id;
 
@@ -104,10 +98,8 @@ int wl1251_acx_wake_up_conditions(struct wl1251 *wl, u8 wake_up_event,
 	wl1251_debug(DEBUG_ACX, "acx wake up conditions");
 
 	wake_up = kzalloc(sizeof(*wake_up), GFP_KERNEL);
-	if (!wake_up) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!wake_up)
+		return -ENOMEM;
 
 	wake_up->wake_up_event = wake_up_event;
 	wake_up->listen_interval = listen_interval;
@@ -132,16 +124,13 @@ int wl1251_acx_sleep_auth(struct wl1251 *wl, u8 sleep_auth)
 	wl1251_debug(DEBUG_ACX, "acx sleep auth");
 
 	auth = kzalloc(sizeof(*auth), GFP_KERNEL);
-	if (!auth) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!auth)
+		return -ENOMEM;
 
 	auth->sleep_auth = sleep_auth;
 
 	ret = wl1251_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
 
-out:
 	kfree(auth);
 	return ret;
 }
@@ -154,10 +143,8 @@ int wl1251_acx_fw_version(struct wl1251 *wl, char *buf, size_t len)
 	wl1251_debug(DEBUG_ACX, "acx fw rev");
 
 	rev = kzalloc(sizeof(*rev), GFP_KERNEL);
-	if (!rev) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!rev)
+		return -ENOMEM;
 
 	ret = wl1251_cmd_interrogate(wl, ACX_FW_REV, rev, sizeof(*rev));
 	if (ret < 0) {
@@ -191,10 +178,8 @@ int wl1251_acx_tx_power(struct wl1251 *wl, int power)
 		return -EINVAL;
 
 	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
-	if (!acx) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!acx)
+		return -ENOMEM;
 
 	acx->current_tx_power = power * 10;
 
@@ -209,7 +194,7 @@ out:
 	return ret;
 }
 
-int wl1251_acx_feature_cfg(struct wl1251 *wl)
+int wl1251_acx_feature_cfg(struct wl1251 *wl, u32 data_flow_options)
 {
 	struct acx_feature_config *feature;
 	int ret;
@@ -217,13 +202,11 @@ int wl1251_acx_feature_cfg(struct wl1251 *wl)
 	wl1251_debug(DEBUG_ACX, "acx feature cfg");
 
 	feature = kzalloc(sizeof(*feature), GFP_KERNEL);
-	if (!feature) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!feature)
+		return -ENOMEM;
 
-	/* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */
-	feature->data_flow_options = 0;
+	/* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE can be set */
+	feature->data_flow_options = data_flow_options;
 	feature->options = 0;
 
 	ret = wl1251_cmd_configure(wl, ACX_FEATURE_CFG,
@@ -261,10 +244,8 @@ int wl1251_acx_data_path_params(struct wl1251 *wl,
 	wl1251_debug(DEBUG_ACX, "acx data path params");
 
 	params = kzalloc(sizeof(*params), GFP_KERNEL);
-	if (!params) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!params)
+		return -ENOMEM;
 
 	params->rx_packet_ring_chunk_size = DP_RX_PACKET_RING_CHUNK_SIZE;
 	params->tx_packet_ring_chunk_size = DP_TX_PACKET_RING_CHUNK_SIZE;
@@ -309,10 +290,8 @@ int wl1251_acx_rx_msdu_life_time(struct wl1251 *wl, u32 life_time)
 	wl1251_debug(DEBUG_ACX, "acx rx msdu life time");
 
 	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
-	if (!acx) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!acx)
+		return -ENOMEM;
 
 	acx->lifetime = life_time;
 	ret = wl1251_cmd_configure(wl, DOT11_RX_MSDU_LIFE_TIME,
@@ -335,10 +314,8 @@ int wl1251_acx_rx_config(struct wl1251 *wl, u32 config, u32 filter)
 	wl1251_debug(DEBUG_ACX, "acx rx config");
 
 	rx_config = kzalloc(sizeof(*rx_config), GFP_KERNEL);
-	if (!rx_config) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!rx_config)
+		return -ENOMEM;
 
 	rx_config->config_options = config;
 	rx_config->filter_options = filter;
@@ -363,10 +340,8 @@ int wl1251_acx_pd_threshold(struct wl1251 *wl)
 	wl1251_debug(DEBUG_ACX, "acx data pd threshold");
 
 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
-	if (!pd) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!pd)
+		return -ENOMEM;
 
 	/* FIXME: threshold value not set */
 
@@ -389,10 +364,8 @@ int wl1251_acx_slot(struct wl1251 *wl, enum acx_slot_type slot_time)
 	wl1251_debug(DEBUG_ACX, "acx slot");
 
 	slot = kzalloc(sizeof(*slot), GFP_KERNEL);
-	if (!slot) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!slot)
+		return -ENOMEM;
 
 	slot->wone_index = STATION_WONE_INDEX;
 	slot->slot_time = slot_time;
@@ -408,7 +381,8 @@ out:
 	return ret;
 }
 
-int wl1251_acx_group_address_tbl(struct wl1251 *wl)
+int wl1251_acx_group_address_tbl(struct wl1251 *wl, bool enable,
+				 void *mc_list, u32 mc_list_len)
 {
 	struct acx_dot11_grp_addr_tbl *acx;
 	int ret;
@@ -416,15 +390,13 @@ int wl1251_acx_group_address_tbl(struct wl1251 *wl)
 	wl1251_debug(DEBUG_ACX, "acx group address tbl");
 
 	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
-	if (!acx) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!acx)
+		return -ENOMEM;
 
 	/* MAC filtering */
-	acx->enabled = 0;
-	acx->num_groups = 0;
-	memset(acx->mac_table, 0, ADDRESS_GROUP_MAX_LEN);
+	acx->enabled = enable;
+	acx->num_groups = mc_list_len;
+	memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
 
 	ret = wl1251_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL,
 				   acx, sizeof(*acx));
@@ -444,10 +416,8 @@ int wl1251_acx_service_period_timeout(struct wl1251 *wl)
 	int ret;
 
 	rx_timeout = kzalloc(sizeof(*rx_timeout), GFP_KERNEL);
-	if (!rx_timeout) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!rx_timeout)
+		return -ENOMEM;
 
 	wl1251_debug(DEBUG_ACX, "acx service period timeout");
 
@@ -475,10 +445,8 @@ int wl1251_acx_rts_threshold(struct wl1251 *wl, u16 rts_threshold)
 	wl1251_debug(DEBUG_ACX, "acx rts threshold");
 
 	rts = kzalloc(sizeof(*rts), GFP_KERNEL);
-	if (!rts) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!rts)
+		return -ENOMEM;
 
 	rts->threshold = rts_threshold;
 
@@ -501,10 +469,8 @@ int wl1251_acx_beacon_filter_opt(struct wl1251 *wl, bool enable_filter)
 	wl1251_debug(DEBUG_ACX, "acx beacon filter opt");
 
 	beacon_filter = kzalloc(sizeof(*beacon_filter), GFP_KERNEL);
-	if (!beacon_filter) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!beacon_filter)
+		return -ENOMEM;
 
 	beacon_filter->enable = enable_filter;
 	beacon_filter->max_num_beacons = 0;
@@ -530,10 +496,8 @@ int wl1251_acx_beacon_filter_table(struct wl1251 *wl)
 	wl1251_debug(DEBUG_ACX, "acx beacon filter table");
 
 	ie_table = kzalloc(sizeof(*ie_table), GFP_KERNEL);
-	if (!ie_table) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!ie_table)
+		return -ENOMEM;
 
 	/* configure default beacon pass-through rules */
 	ie_table->num_ie = 1;
@@ -560,10 +524,8 @@ int wl1251_acx_conn_monit_params(struct wl1251 *wl)
 	wl1251_debug(DEBUG_ACX, "acx connection monitor parameters");
 
 	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
-	if (!acx) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!acx)
+		return -ENOMEM;
 
 	acx->synch_fail_thold = SYNCH_FAIL_DEFAULT_THRESHOLD;
 	acx->bss_lose_timeout = NO_BEACON_DEFAULT_TIMEOUT;
@@ -589,10 +551,8 @@ int wl1251_acx_sg_enable(struct wl1251 *wl)
 	wl1251_debug(DEBUG_ACX, "acx sg enable");
 
 	pta = kzalloc(sizeof(*pta), GFP_KERNEL);
-	if (!pta) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!pta)
+		return -ENOMEM;
 
 	pta->enable = SG_ENABLE;
 
@@ -615,10 +575,8 @@ int wl1251_acx_sg_cfg(struct wl1251 *wl)
 	wl1251_debug(DEBUG_ACX, "acx sg cfg");
 
 	param = kzalloc(sizeof(*param), GFP_KERNEL);
-	if (!param) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!param)
+		return -ENOMEM;
 
 	/* BT-WLAN coext parameters */
 	param->min_rate = RATE_INDEX_24MBPS;
@@ -669,10 +627,8 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl)
 	wl1251_debug(DEBUG_ACX, "acx cca threshold");
 
 	detection = kzalloc(sizeof(*detection), GFP_KERNEL);
-	if (!detection) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!detection)
+		return -ENOMEM;
 
 	detection->rx_cca_threshold = CCA_THRSH_DISABLE_ENERGY_D;
 	detection->tx_energy_detection = 0;
@@ -682,7 +638,6 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl)
 	if (ret < 0)
 		wl1251_warning("failed to set cca threshold: %d", ret);
 
-out:
 	kfree(detection);
 	return ret;
 }
@@ -695,10 +650,8 @@ int wl1251_acx_bcn_dtim_options(struct wl1251 *wl)
 	wl1251_debug(DEBUG_ACX, "acx bcn dtim options");
 
 	bb = kzalloc(sizeof(*bb), GFP_KERNEL);
-	if (!bb) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!bb)
+		return -ENOMEM;
 
 	bb->beacon_rx_timeout = BCN_RX_TIMEOUT_DEF_VALUE;
 	bb->broadcast_timeout = BROADCAST_RX_TIMEOUT_DEF_VALUE;
@@ -724,10 +677,8 @@ int wl1251_acx_aid(struct wl1251 *wl, u16 aid)
 	wl1251_debug(DEBUG_ACX, "acx aid");
 
 	acx_aid = kzalloc(sizeof(*acx_aid), GFP_KERNEL);
-	if (!acx_aid) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!acx_aid)
+		return -ENOMEM;
 
 	acx_aid->aid = aid;
 
@@ -750,10 +701,8 @@ int wl1251_acx_event_mbox_mask(struct wl1251 *wl, u32 event_mask)
 	wl1251_debug(DEBUG_ACX, "acx event mbox mask");
 
 	mask = kzalloc(sizeof(*mask), GFP_KERNEL);
-	if (!mask) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!mask)
+		return -ENOMEM;
 
 	/* high event mask is unused */
 	mask->high_event_mask = 0xffffffff;
@@ -805,10 +754,8 @@ int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble)
 	wl1251_debug(DEBUG_ACX, "acx_set_preamble");
 
 	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
-	if (!acx) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!acx)
+		return -ENOMEM;
 
 	acx->preamble = preamble;
 
@@ -832,10 +779,8 @@ int wl1251_acx_cts_protect(struct wl1251 *wl,
 	wl1251_debug(DEBUG_ACX, "acx_set_ctsprotect");
 
 	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
-	if (!acx) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!acx)
+		return -ENOMEM;
 
 	acx->ctsprotect = ctsprotect;
 
@@ -856,10 +801,8 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime)
 	int ret;
 
 	tsf_info = kzalloc(sizeof(*tsf_info), GFP_KERNEL);
-	if (!tsf_info) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!tsf_info)
+		return -ENOMEM;
 
 	ret = wl1251_cmd_interrogate(wl, ACX_TSF_INFO,
 				     tsf_info, sizeof(*tsf_info));
@@ -900,19 +843,22 @@ int wl1251_acx_rate_policies(struct wl1251 *wl)
 	wl1251_debug(DEBUG_ACX, "acx rate policies");
 
 	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
-
-	if (!acx) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!acx)
+		return -ENOMEM;
 
 	/* configure one default (one-size-fits-all) rate class */
-	acx->rate_class_cnt = 1;
+	acx->rate_class_cnt = 2;
 	acx->rate_class[0].enabled_rates = ACX_RATE_MASK_UNSPECIFIED;
 	acx->rate_class[0].short_retry_limit = ACX_RATE_RETRY_LIMIT;
 	acx->rate_class[0].long_retry_limit = ACX_RATE_RETRY_LIMIT;
 	acx->rate_class[0].aflags = 0;
 
+	/* no-retry rate class */
+	acx->rate_class[1].enabled_rates = ACX_RATE_MASK_UNSPECIFIED;
+	acx->rate_class[1].short_retry_limit = 0;
+	acx->rate_class[1].long_retry_limit = 0;
+	acx->rate_class[1].aflags = 0;
+
 	ret = wl1251_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
 	if (ret < 0) {
 		wl1251_warning("Setting of rate policies failed: %d", ret);
@@ -932,10 +878,8 @@ int wl1251_acx_mem_cfg(struct wl1251 *wl)
 	wl1251_debug(DEBUG_ACX, "acx mem cfg");
 
 	mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL);
-	if (!mem_conf) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!mem_conf)
+		return -ENOMEM;
 
 	/* memory config */
 	mem_conf->mem_config.num_stations = cpu_to_le16(DEFAULT_NUM_STATIONS);
@@ -979,10 +923,8 @@ int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim)
 	wl1251_debug(DEBUG_ACX, "acx tbtt and dtim");
 
 	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
-	if (!acx) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!acx)
+		return -ENOMEM;
 
 	acx->tbtt = tbtt;
 	acx->dtim = dtim;
@@ -1008,10 +950,8 @@ int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
 	wl1251_debug(DEBUG_ACX, "acx bet enable");
 
 	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
-	if (!acx) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!acx)
+		return -ENOMEM;
 
 	acx->enable = mode;
 	acx->max_consecutive = max_consecutive;
@@ -1027,6 +967,32 @@ out:
 	return ret;
 }
 
+int wl1251_acx_arp_ip_filter(struct wl1251 *wl, bool enable, __be32 address)
+{
+	struct wl1251_acx_arp_filter *acx;
+	int ret;
+
+	wl1251_debug(DEBUG_ACX, "acx arp ip filter, enable: %d", enable);
+
+	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+	if (!acx)
+		return -ENOMEM;
+
+	acx->version = ACX_IPV4_VERSION;
+	acx->enable = enable;
+
+	if (enable)
+		memcpy(acx->address, &address, ACX_IPV4_ADDR_SIZE);
+
+	ret = wl1251_cmd_configure(wl, ACX_ARP_IP_FILTER,
+				   acx, sizeof(*acx));
+	if (ret < 0)
+		wl1251_warning("failed to set arp ip filter: %d", ret);
+
+	kfree(acx);
+	return ret;
+}
+
 int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
 		      u8 aifs, u16 txop)
 {
@@ -1037,11 +1003,8 @@ int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
 		     "aifs %d txop %d", ac, cw_min, cw_max, aifs, txop);
 
 	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
-
-	if (!acx) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!acx)
+		return -ENOMEM;
 
 	acx->ac = ac;
 	acx->cw_min = cw_min;
@@ -1073,11 +1036,8 @@ int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
 		     ps_scheme, ack_policy);
 
 	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
-
-	if (!acx) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!acx)
+		return -ENOMEM;
 
 	acx->queue = queue;
 	acx->type = type;
diff --git a/drivers/net/wireless/ti/wl1251/acx.h b/drivers/net/wireless/ti/wl1251/acx.h
index c2ba100f9b1a..2bdec38699f4 100644
--- a/drivers/net/wireless/ti/wl1251/acx.h
+++ b/drivers/net/wireless/ti/wl1251/acx.h
@@ -350,8 +350,8 @@ struct acx_slot {
 } __packed;
 
 
-#define ADDRESS_GROUP_MAX	(8)
-#define ADDRESS_GROUP_MAX_LEN	(ETH_ALEN * ADDRESS_GROUP_MAX)
+#define ACX_MC_ADDRESS_GROUP_MAX	(8)
+#define ACX_MC_ADDRESS_GROUP_MAX_LEN	(ETH_ALEN * ACX_MC_ADDRESS_GROUP_MAX)
 
 struct acx_dot11_grp_addr_tbl {
 	struct acx_header header;
@@ -359,7 +359,7 @@ struct acx_dot11_grp_addr_tbl {
 	u8 enabled;
 	u8 num_groups;
 	u8 pad[2];
-	u8 mac_table[ADDRESS_GROUP_MAX_LEN];
+	u8 mac_table[ACX_MC_ADDRESS_GROUP_MAX_LEN];
 } __packed;
 
 
@@ -1232,6 +1232,20 @@ struct wl1251_acx_bet_enable {
 	u8 padding[2];
 } __packed;
 
+#define ACX_IPV4_VERSION 4
+#define ACX_IPV6_VERSION 6
+#define ACX_IPV4_ADDR_SIZE 4
+struct wl1251_acx_arp_filter {
+	struct acx_header header;
+	u8 version;	/* The IP version: 4 - IPv4, 6 - IPv6.*/
+	u8 enable;	/* 1 - ARP filtering is enabled, 0 - disabled */
+	u8 padding[2];
+	u8 address[16];	/* The IP address used to filter ARP packets.
+			   ARP packets that do not match this address are
+			   dropped. When the IP Version is 4, the last 12
+			   bytes of the the address are ignored. */
+} __attribute__((packed));
+
 struct wl1251_acx_ac_cfg {
 	struct acx_header header;
 
@@ -1440,7 +1454,7 @@ int wl1251_acx_wake_up_conditions(struct wl1251 *wl, u8 wake_up_event,
 int wl1251_acx_sleep_auth(struct wl1251 *wl, u8 sleep_auth);
 int wl1251_acx_fw_version(struct wl1251 *wl, char *buf, size_t len);
 int wl1251_acx_tx_power(struct wl1251 *wl, int power);
-int wl1251_acx_feature_cfg(struct wl1251 *wl);
+int wl1251_acx_feature_cfg(struct wl1251 *wl, u32 data_flow_options);
 int wl1251_acx_mem_map(struct wl1251 *wl,
 		       struct acx_header *mem_map, size_t len);
 int wl1251_acx_data_path_params(struct wl1251 *wl,
@@ -1449,7 +1463,8 @@ int wl1251_acx_rx_msdu_life_time(struct wl1251 *wl, u32 life_time);
 int wl1251_acx_rx_config(struct wl1251 *wl, u32 config, u32 filter);
 int wl1251_acx_pd_threshold(struct wl1251 *wl);
 int wl1251_acx_slot(struct wl1251 *wl, enum acx_slot_type slot_time);
-int wl1251_acx_group_address_tbl(struct wl1251 *wl);
+int wl1251_acx_group_address_tbl(struct wl1251 *wl, bool enable,
+				 void *mc_list, u32 mc_list_len);
 int wl1251_acx_service_period_timeout(struct wl1251 *wl);
 int wl1251_acx_rts_threshold(struct wl1251 *wl, u16 rts_threshold);
 int wl1251_acx_beacon_filter_opt(struct wl1251 *wl, bool enable_filter);
@@ -1473,6 +1488,7 @@ int wl1251_acx_mem_cfg(struct wl1251 *wl);
 int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
 int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
 			  u8 max_consecutive);
+int wl1251_acx_arp_ip_filter(struct wl1251 *wl, bool enable, __be32 address);
 int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
 		      u8 aifs, u16 txop);
 int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
diff --git a/drivers/net/wireless/ti/wl1251/boot.c b/drivers/net/wireless/ti/wl1251/boot.c
index a2e5241382da..2000cd536077 100644
--- a/drivers/net/wireless/ti/wl1251/boot.c
+++ b/drivers/net/wireless/ti/wl1251/boot.c
@@ -299,7 +299,8 @@ int wl1251_boot_run_firmware(struct wl1251 *wl)
 		ROAMING_TRIGGER_LOW_RSSI_EVENT_ID |
 		ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID |
 		REGAINED_BSS_EVENT_ID | BT_PTA_SENSE_EVENT_ID |
-		BT_PTA_PREDICTION_EVENT_ID | JOIN_EVENT_COMPLETE_ID;
+		BT_PTA_PREDICTION_EVENT_ID | JOIN_EVENT_COMPLETE_ID |
+		PS_REPORT_EVENT_ID;
 
 	ret = wl1251_event_unmask(wl);
 	if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index 6822b845efc1..223649bcaa5a 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -3,6 +3,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/crc7.h>
+#include <linux/etherdevice.h>
 
 #include "wl1251.h"
 #include "reg.h"
@@ -203,11 +204,11 @@ out:
 	return ret;
 }
 
-int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable)
+int wl1251_cmd_data_path_rx(struct wl1251 *wl, u8 channel, bool enable)
 {
 	struct cmd_enabledisable_path *cmd;
 	int ret;
-	u16 cmd_rx, cmd_tx;
+	u16 cmd_rx;
 
 	wl1251_debug(DEBUG_CMD, "cmd data path");
 
@@ -219,13 +220,10 @@ int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable)
 
 	cmd->channel = channel;
 
-	if (enable) {
+	if (enable)
 		cmd_rx = CMD_ENABLE_RX;
-		cmd_tx = CMD_ENABLE_TX;
-	} else {
+	else
 		cmd_rx = CMD_DISABLE_RX;
-		cmd_tx = CMD_DISABLE_TX;
-	}
 
 	ret = wl1251_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd));
 	if (ret < 0) {
@@ -237,17 +235,38 @@ int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable)
 	wl1251_debug(DEBUG_BOOT, "rx %s cmd channel %d",
 		     enable ? "start" : "stop", channel);
 
+out:
+	kfree(cmd);
+	return ret;
+}
+
+int wl1251_cmd_data_path_tx(struct wl1251 *wl, u8 channel, bool enable)
+{
+	struct cmd_enabledisable_path *cmd;
+	int ret;
+	u16 cmd_tx;
+
+	wl1251_debug(DEBUG_CMD, "cmd data path");
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->channel = channel;
+
+	if (enable)
+		cmd_tx = CMD_ENABLE_TX;
+	else
+		cmd_tx = CMD_DISABLE_TX;
+
 	ret = wl1251_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd));
-	if (ret < 0) {
+	if (ret < 0)
 		wl1251_error("tx %s cmd for channel %d failed",
 			     enable ? "start" : "stop", channel);
-		goto out;
-	}
-
-	wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d",
-		     enable ? "start" : "stop", channel);
+	else
+		wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d",
+			     enable ? "start" : "stop", channel);
 
-out:
 	kfree(cmd);
 	return ret;
 }
@@ -410,7 +429,9 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
 	struct wl1251_cmd_scan *cmd;
 	int i, ret = 0;
 
-	wl1251_debug(DEBUG_CMD, "cmd scan");
+	wl1251_debug(DEBUG_CMD, "cmd scan channels %d", n_channels);
+
+	WARN_ON(n_channels > SCAN_MAX_NUM_OF_CHANNELS);
 
 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
 	if (!cmd)
@@ -421,6 +442,13 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
 						    CFG_RX_MGMT_EN |
 						    CFG_RX_BCN_EN);
 	cmd->params.scan_options = 0;
+	/*
+	 * Use high priority scan when not associated to prevent fw issue
+	 * causing never-ending scans (sometimes 20+ minutes).
+	 * Note: This bug may be caused by the fw's DTIM handling.
+	 */
+	if (is_zero_ether_addr(wl->bssid))
+		cmd->params.scan_options |= WL1251_SCAN_OPT_PRIORITY_HIGH;
 	cmd->params.num_channels = n_channels;
 	cmd->params.num_probe_requests = n_probes;
 	cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
diff --git a/drivers/net/wireless/ti/wl1251/cmd.h b/drivers/net/wireless/ti/wl1251/cmd.h
index ee4f2b391822..d824ff978311 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.h
+++ b/drivers/net/wireless/ti/wl1251/cmd.h
@@ -35,7 +35,8 @@ int wl1251_cmd_interrogate(struct wl1251 *wl, u16 id, void *buf, size_t len);
 int wl1251_cmd_configure(struct wl1251 *wl, u16 id, void *buf, size_t len);
 int wl1251_cmd_vbm(struct wl1251 *wl, u8 identity,
 		   void *bitmap, u16 bitmap_len, u8 bitmap_control);
-int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable);
+int wl1251_cmd_data_path_rx(struct wl1251 *wl, u8 channel, bool enable);
+int wl1251_cmd_data_path_tx(struct wl1251 *wl, u8 channel, bool enable);
 int wl1251_cmd_join(struct wl1251 *wl, u8 bss_type, u8 channel,
 		    u16 beacon_interval, u8 dtim_interval);
 int wl1251_cmd_ps_mode(struct wl1251 *wl, u8 ps_mode);
@@ -167,6 +168,11 @@ struct cmd_read_write_memory {
 #define CMDMBOX_HEADER_LEN 4
 #define CMDMBOX_INFO_ELEM_HEADER_LEN 4
 
+#define WL1251_SCAN_OPT_PASSIVE		1
+#define WL1251_SCAN_OPT_5GHZ_BAND	2
+#define WL1251_SCAN_OPT_TRIGGERD_SCAN	4
+#define WL1251_SCAN_OPT_PRIORITY_HIGH	8
+
 #define WL1251_SCAN_MIN_DURATION 30000
 #define WL1251_SCAN_MAX_DURATION 60000
 
diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index 74ae8e1c2e33..db0105313745 100644
--- a/drivers/net/wireless/ti/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
@@ -46,6 +46,43 @@ static int wl1251_event_scan_complete(struct wl1251 *wl,
 	return ret;
 }
 
+#define WL1251_PSM_ENTRY_RETRIES  3
+static int wl1251_event_ps_report(struct wl1251 *wl,
+				  struct event_mailbox *mbox)
+{
+	int ret = 0;
+
+	wl1251_debug(DEBUG_EVENT, "ps status: %x", mbox->ps_status);
+
+	switch (mbox->ps_status) {
+	case EVENT_ENTER_POWER_SAVE_FAIL:
+		wl1251_debug(DEBUG_PSM, "PSM entry failed");
+
+		if (wl->station_mode != STATION_POWER_SAVE_MODE) {
+			/* remain in active mode */
+			wl->psm_entry_retry = 0;
+			break;
+		}
+
+		if (wl->psm_entry_retry < WL1251_PSM_ENTRY_RETRIES) {
+			wl->psm_entry_retry++;
+			ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
+		} else {
+			wl1251_error("Power save entry failed, giving up");
+			wl->psm_entry_retry = 0;
+		}
+		break;
+	case EVENT_ENTER_POWER_SAVE_SUCCESS:
+	case EVENT_EXIT_POWER_SAVE_FAIL:
+	case EVENT_EXIT_POWER_SAVE_SUCCESS:
+	default:
+		wl->psm_entry_retry = 0;
+		break;
+	}
+
+	return 0;
+}
+
 static void wl1251_event_mbox_dump(struct event_mailbox *mbox)
 {
 	wl1251_debug(DEBUG_EVENT, "MBOX DUMP:");
@@ -80,7 +117,14 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
 		}
 	}
 
-	if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
+	if (vector & PS_REPORT_EVENT_ID) {
+		wl1251_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
+		ret = wl1251_event_ps_report(wl, mbox);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (wl->vif && vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
 		wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
 
 		/* indicate to the stack, that beacons have been lost */
diff --git a/drivers/net/wireless/ti/wl1251/event.h b/drivers/net/wireless/ti/wl1251/event.h
index 30eb5d150bf7..88570a5cd042 100644
--- a/drivers/net/wireless/ti/wl1251/event.h
+++ b/drivers/net/wireless/ti/wl1251/event.h
@@ -112,6 +112,13 @@ struct event_mailbox {
 	u8 padding[19];
 } __packed;
 
+enum {
+	EVENT_ENTER_POWER_SAVE_FAIL = 0,
+	EVENT_ENTER_POWER_SAVE_SUCCESS,
+	EVENT_EXIT_POWER_SAVE_FAIL,
+	EVENT_EXIT_POWER_SAVE_SUCCESS,
+};
+
 int wl1251_event_unmask(struct wl1251 *wl);
 void wl1251_event_mbox_config(struct wl1251 *wl);
 int wl1251_event_handle(struct wl1251 *wl, u8 mbox);
diff --git a/drivers/net/wireless/ti/wl1251/init.c b/drivers/net/wireless/ti/wl1251/init.c
index 89b43d35473c..1d799bffaa9f 100644
--- a/drivers/net/wireless/ti/wl1251/init.c
+++ b/drivers/net/wireless/ti/wl1251/init.c
@@ -33,7 +33,7 @@ int wl1251_hw_init_hwenc_config(struct wl1251 *wl)
 {
 	int ret;
 
-	ret = wl1251_acx_feature_cfg(wl);
+	ret = wl1251_acx_feature_cfg(wl, 0);
 	if (ret < 0) {
 		wl1251_warning("couldn't set feature config");
 		return ret;
@@ -127,7 +127,7 @@ int wl1251_hw_init_phy_config(struct wl1251 *wl)
 	if (ret < 0)
 		return ret;
 
-	ret = wl1251_acx_group_address_tbl(wl);
+	ret = wl1251_acx_group_address_tbl(wl, true, NULL, 0);
 	if (ret < 0)
 		return ret;
 
@@ -394,8 +394,13 @@ int wl1251_hw_init(struct wl1251 *wl)
 	if (ret < 0)
 		goto out_free_data_path;
 
-	/* Enable data path */
-	ret = wl1251_cmd_data_path(wl, wl->channel, 1);
+	/* Enable rx data path */
+	ret = wl1251_cmd_data_path_rx(wl, wl->channel, 1);
+	if (ret < 0)
+		goto out_free_data_path;
+
+	/* Enable tx data path */
+	ret = wl1251_cmd_data_path_tx(wl, wl->channel, 1);
 	if (ret < 0)
 		goto out_free_data_path;
 
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 3291ffa95273..757e25784a8a 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -28,6 +28,7 @@
 #include <linux/etherdevice.h>
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
+#include <linux/netdevice.h>
 
 #include "wl1251.h"
 #include "wl12xx_80211.h"
@@ -479,10 +480,13 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
 	wl->next_tx_complete = 0;
 	wl->elp = false;
 	wl->station_mode = STATION_ACTIVE_MODE;
+	wl->psm_entry_retry = 0;
 	wl->tx_queue_stopped = false;
 	wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
 	wl->rssi_thold = 0;
 	wl->channel = WL1251_DEFAULT_CHANNEL;
+	wl->monitor_present = false;
+	wl->joined = false;
 
 	wl1251_debugfs_reset(wl);
 
@@ -521,7 +525,7 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
 		goto out;
 	}
 
-	if (memcmp(wl->mac_addr, vif->addr, ETH_ALEN)) {
+	if (!ether_addr_equal_unaligned(wl->mac_addr, vif->addr)) {
 		memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
 		SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
 		ret = wl1251_acx_station_id(wl);
@@ -542,6 +546,7 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
 	mutex_lock(&wl->mutex);
 	wl1251_debug(DEBUG_MAC80211, "mac80211 remove interface");
 	wl->vif = NULL;
+	memset(wl->bssid, 0, ETH_ALEN);
 	mutex_unlock(&wl->mutex);
 }
 
@@ -566,6 +571,11 @@ static int wl1251_build_qos_null_data(struct wl1251 *wl)
 				       sizeof(template));
 }
 
+static bool wl1251_can_do_pm(struct ieee80211_conf *conf, struct wl1251 *wl)
+{
+	return (conf->flags & IEEE80211_CONF_PS) && !wl->monitor_present;
+}
+
 static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
 {
 	struct wl1251 *wl = hw->priv;
@@ -575,8 +585,10 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
 	channel = ieee80211_frequency_to_channel(
 			conf->chandef.chan->center_freq);
 
-	wl1251_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d",
+	wl1251_debug(DEBUG_MAC80211,
+		     "mac80211 config ch %d monitor %s psm %s power %d",
 		     channel,
+		     conf->flags & IEEE80211_CONF_MONITOR ? "on" : "off",
 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
 		     conf->power_level);
 
@@ -586,16 +598,44 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
 	if (ret < 0)
 		goto out;
 
+	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+		u32 mode;
+
+		if (conf->flags & IEEE80211_CONF_MONITOR) {
+			wl->monitor_present = true;
+			mode = DF_SNIFF_MODE_ENABLE | DF_ENCRYPTION_DISABLE;
+		} else {
+			wl->monitor_present = false;
+			mode = 0;
+		}
+
+		ret = wl1251_acx_feature_cfg(wl, mode);
+		if (ret < 0)
+			goto out_sleep;
+	}
+
 	if (channel != wl->channel) {
 		wl->channel = channel;
 
-		ret = wl1251_join(wl, wl->bss_type, wl->channel,
-				  wl->beacon_int, wl->dtim_period);
+		/*
+		 * Use ENABLE_RX command for channel switching when no
+		 * interface is present (monitor mode only).
+		 * This leaves the tx path disabled in firmware, whereas
+		 * the usual JOIN command seems to transmit some frames
+		 * at firmware level.
+		 */
+		if (wl->vif == NULL) {
+			wl->joined = false;
+			ret = wl1251_cmd_data_path_rx(wl, wl->channel, 1);
+		} else {
+			ret = wl1251_join(wl, wl->bss_type, wl->channel,
+					  wl->beacon_int, wl->dtim_period);
+		}
 		if (ret < 0)
 			goto out_sleep;
 	}
 
-	if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) {
+	if (wl1251_can_do_pm(conf, wl) && !wl->psm_requested) {
 		wl1251_debug(DEBUG_PSM, "psm enabled");
 
 		wl->psm_requested = true;
@@ -611,8 +651,7 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
 		ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
 		if (ret < 0)
 			goto out_sleep;
-	} else if (!(conf->flags & IEEE80211_CONF_PS) &&
-		   wl->psm_requested) {
+	} else if (!wl1251_can_do_pm(conf, wl) && wl->psm_requested) {
 		wl1251_debug(DEBUG_PSM, "psm disabled");
 
 		wl->psm_requested = false;
@@ -648,6 +687,16 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
 		wl->power_level = conf->power_level;
 	}
 
+	/*
+	 * Tell stack that connection is lost because hw encryption isn't
+	 * supported in monitor mode.
+	 * This requires temporary enabling of the hw connection monitor flag
+	 */
+	if ((changed & IEEE80211_CONF_CHANGE_MONITOR) && wl->vif) {
+		wl->hw->flags |= IEEE80211_HW_CONNECTION_MONITOR;
+		ieee80211_connection_loss(wl->vif);
+	}
+
 out_sleep:
 	wl1251_ps_elp_sleep(wl);
 
@@ -657,6 +706,44 @@ out:
 	return ret;
 }
 
+struct wl1251_filter_params {
+	bool enabled;
+	int mc_list_length;
+	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
+};
+
+static u64 wl1251_op_prepare_multicast(struct ieee80211_hw *hw,
+				       struct netdev_hw_addr_list *mc_list)
+{
+	struct wl1251_filter_params *fp;
+	struct netdev_hw_addr *ha;
+	struct wl1251 *wl = hw->priv;
+
+	if (unlikely(wl->state == WL1251_STATE_OFF))
+		return 0;
+
+	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
+	if (!fp) {
+		wl1251_error("Out of memory setting filters.");
+		return 0;
+	}
+
+	/* update multicast filtering parameters */
+	fp->mc_list_length = 0;
+	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
+		fp->enabled = false;
+	} else {
+		fp->enabled = true;
+		netdev_hw_addr_list_for_each(ha, mc_list) {
+			memcpy(fp->mc_list[fp->mc_list_length],
+					ha->addr, ETH_ALEN);
+			fp->mc_list_length++;
+		}
+	}
+
+	return (u64)(unsigned long)fp;
+}
+
 #define WL1251_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
 				  FIF_ALLMULTI | \
 				  FIF_FCSFAIL | \
@@ -667,8 +754,9 @@ out:
 
 static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
 				       unsigned int changed,
-				       unsigned int *total,u64 multicast)
+				       unsigned int *total, u64 multicast)
 {
+	struct wl1251_filter_params *fp = (void *)(unsigned long)multicast;
 	struct wl1251 *wl = hw->priv;
 	int ret;
 
@@ -677,9 +765,11 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
 	*total &= WL1251_SUPPORTED_FILTERS;
 	changed &= WL1251_SUPPORTED_FILTERS;
 
-	if (changed == 0)
+	if (changed == 0) {
 		/* no filters which we support changed */
+		kfree(fp);
 		return;
+	}
 
 	mutex_lock(&wl->mutex);
 
@@ -716,6 +806,15 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
 	if (ret < 0)
 		goto out;
 
+	if (*total & FIF_ALLMULTI || *total & FIF_PROMISC_IN_BSS)
+		ret = wl1251_acx_group_address_tbl(wl, false, NULL, 0);
+	else if (fp)
+		ret = wl1251_acx_group_address_tbl(wl, fp->enabled,
+						   fp->mc_list,
+						   fp->mc_list_length);
+	if (ret < 0)
+		goto out;
+
 	/* send filters to firmware */
 	wl1251_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
 
@@ -723,6 +822,7 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
 
 out:
 	mutex_unlock(&wl->mutex);
+	kfree(fp);
 }
 
 /* HW encryption */
@@ -802,12 +902,12 @@ static int wl1251_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
 	mutex_lock(&wl->mutex);
 
-	ret = wl1251_ps_elp_wakeup(wl);
-	if (ret < 0)
-		goto out_unlock;
-
 	switch (cmd) {
 	case SET_KEY:
+		if (wl->monitor_present) {
+			ret = -EOPNOTSUPP;
+			goto out_unlock;
+		}
 		wl_cmd->key_action = KEY_ADD_OR_REPLACE;
 		break;
 	case DISABLE_KEY:
@@ -818,6 +918,10 @@ static int wl1251_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 		break;
 	}
 
+	ret = wl1251_ps_elp_wakeup(wl);
+	if (ret < 0)
+		goto out_unlock;
+
 	ret = wl1251_set_key_type(wl, wl_cmd, cmd, key, addr);
 	if (ret < 0) {
 		wl1251_error("Set KEY type failed");
@@ -930,6 +1034,7 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
 	ret = wl1251_cmd_scan(wl, ssid, ssid_len, req->channels,
 			      req->n_channels, WL1251_SCAN_NUM_PROBES);
 	if (ret < 0) {
+		wl1251_debug(DEBUG_SCAN, "scan failed %d", ret);
 		wl->scanning = false;
 		goto out_idle;
 	}
@@ -977,6 +1082,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
 {
 	struct wl1251 *wl = hw->priv;
 	struct sk_buff *beacon, *skb;
+	bool enable;
 	int ret;
 
 	wl1251_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1023,6 +1129,9 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
 	}
 
 	if (changed & BSS_CHANGED_ASSOC) {
+		/* Disable temporary enabled hw connection monitor flag */
+		wl->hw->flags &= ~IEEE80211_HW_CONNECTION_MONITOR;
+
 		if (bss_conf->assoc) {
 			wl->beacon_int = bss_conf->beacon_int;
 
@@ -1075,6 +1184,17 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
 		}
 	}
 
+	if (changed & BSS_CHANGED_ARP_FILTER) {
+		__be32 addr = bss_conf->arp_addr_list[0];
+		WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
+
+		enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
+		wl1251_acx_arp_ip_filter(wl, enable, addr);
+
+		if (ret < 0)
+			goto out_sleep;
+	}
+
 	if (changed & BSS_CHANGED_BEACON) {
 		beacon = ieee80211_beacon_get(hw, vif);
 		if (!beacon)
@@ -1245,6 +1365,7 @@ static const struct ieee80211_ops wl1251_ops = {
 	.add_interface = wl1251_op_add_interface,
 	.remove_interface = wl1251_op_remove_interface,
 	.config = wl1251_op_config,
+	.prepare_multicast = wl1251_op_prepare_multicast,
 	.configure_filter = wl1251_op_configure_filter,
 	.tx = wl1251_op_tx,
 	.set_key = wl1251_op_set_key,
@@ -1347,7 +1468,6 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
 
 	/* unit us */
 	/* FIXME: find a proper value */
-	wl->hw->channel_change_time = 10000;
 
 	wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
 		IEEE80211_HW_SUPPORTS_PS |
@@ -1401,7 +1521,10 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
 
 	INIT_DELAYED_WORK(&wl->elp_work, wl1251_elp_work);
 	wl->channel = WL1251_DEFAULT_CHANNEL;
+	wl->monitor_present = false;
+	wl->joined = false;
 	wl->scanning = false;
+	wl->bss_type = MAX_BSS_TYPE;
 	wl->default_key = 0;
 	wl->listen_int = 1;
 	wl->rx_counter = 0;
@@ -1413,6 +1536,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
 	wl->elp = false;
 	wl->station_mode = STATION_ACTIVE_MODE;
 	wl->psm_requested = false;
+	wl->psm_entry_retry = 0;
 	wl->tx_queue_stopped = false;
 	wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
 	wl->rssi_thold = 0;
@@ -1478,3 +1602,4 @@ MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
 MODULE_FIRMWARE(WL1251_FW_NAME);
+MODULE_FIRMWARE(WL1251_NVS_NAME);
diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c
index 23289d49dd31..123c4bb50e0a 100644
--- a/drivers/net/wireless/ti/wl1251/rx.c
+++ b/drivers/net/wireless/ti/wl1251/rx.c
@@ -83,7 +83,7 @@ static void wl1251_rx_status(struct wl1251 *wl,
 
 	status->flag |= RX_FLAG_MACTIME_START;
 
-	if (desc->flags & RX_DESC_ENCRYPTION_MASK) {
+	if (!wl->monitor_present && (desc->flags & RX_DESC_ENCRYPTION_MASK)) {
 		status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
 
 		if (likely(!(desc->flags & RX_DESC_DECRYPT_FAIL)))
diff --git a/drivers/net/wireless/ti/wl1251/tx.c b/drivers/net/wireless/ti/wl1251/tx.c
index 28121c590a2b..81de83c6fcf6 100644
--- a/drivers/net/wireless/ti/wl1251/tx.c
+++ b/drivers/net/wireless/ti/wl1251/tx.c
@@ -28,6 +28,7 @@
 #include "tx.h"
 #include "ps.h"
 #include "io.h"
+#include "event.h"
 
 static bool wl1251_tx_double_buffer_busy(struct wl1251 *wl, u32 data_out_count)
 {
@@ -89,8 +90,12 @@ static void wl1251_tx_control(struct tx_double_buffer_desc *tx_hdr,
 	/* 802.11 packets */
 	tx_hdr->control.packet_type = 0;
 
-	if (control->flags & IEEE80211_TX_CTL_NO_ACK)
+	/* Also disable retry and ACK policy for injected packets */
+	if ((control->flags & IEEE80211_TX_CTL_NO_ACK) ||
+	    (control->flags & IEEE80211_TX_CTL_INJECTED)) {
+		tx_hdr->control.rate_policy = 1;
 		tx_hdr->control.ack_policy = 1;
+	}
 
 	tx_hdr->control.tx_complete = 1;
 
@@ -277,6 +282,26 @@ static void wl1251_tx_trigger(struct wl1251 *wl)
 		TX_STATUS_DATA_OUT_COUNT_MASK;
 }
 
+static void enable_tx_for_packet_injection(struct wl1251 *wl)
+{
+	int ret;
+
+	ret = wl1251_cmd_join(wl, BSS_TYPE_STA_BSS, wl->channel,
+			      wl->beacon_int, wl->dtim_period);
+	if (ret < 0) {
+		wl1251_warning("join failed");
+		return;
+	}
+
+	ret = wl1251_event_wait(wl, JOIN_EVENT_COMPLETE_ID, 100);
+	if (ret < 0) {
+		wl1251_warning("join timeout");
+		return;
+	}
+
+	wl->joined = true;
+}
+
 /* caller must hold wl->mutex */
 static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
 {
@@ -287,6 +312,9 @@ static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
 	info = IEEE80211_SKB_CB(skb);
 
 	if (info->control.hw_key) {
+		if (unlikely(wl->monitor_present))
+			return -EINVAL;
+
 		idx = info->control.hw_key->hw_key_idx;
 		if (unlikely(wl->default_key != idx)) {
 			ret = wl1251_acx_default_key(wl, idx);
@@ -295,6 +323,10 @@ static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
 		}
 	}
 
+	/* Enable tx path in monitor mode for packet injection */
+	if ((wl->vif == NULL) && !wl->joined)
+		enable_tx_for_packet_injection(wl);
+
 	ret = wl1251_tx_path_status(wl);
 	if (ret < 0)
 		return ret;
@@ -394,6 +426,7 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
 	info = IEEE80211_SKB_CB(skb);
 
 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
+	    !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
 	    (result->status == TX_SUCCESS))
 		info->flags |= IEEE80211_TX_STAT_ACK;
 
diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
index 2c3bd1bff3f6..235617a7716d 100644
--- a/drivers/net/wireless/ti/wl1251/wl1251.h
+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
@@ -93,6 +93,7 @@ enum {
 	} while (0)
 
 #define WL1251_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN |	\
+				  CFG_MC_FILTER_EN |	\
 				  CFG_BSSID_FILTER_EN)
 
 #define WL1251_DEFAULT_RX_FILTER (CFG_RX_PRSP_EN |  \
@@ -303,6 +304,8 @@ struct wl1251 {
 	u8 bss_type;
 	u8 listen_int;
 	int channel;
+	bool monitor_present;
+	bool joined;
 
 	void *target_mem_map;
 	struct acx_data_path_params_resp *data_path;
@@ -368,6 +371,9 @@ struct wl1251 {
 	/* PSM mode requested */
 	bool psm_requested;
 
+	/* retry counter for PSM entries */
+	u8 psm_entry_retry;
+
 	u16 beacon_int;
 	u8 dtim_period;
 
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
index 4a0bbb13806b..7541bd1a4a4b 100644
--- a/drivers/net/wireless/ti/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wl12xx/scan.c
@@ -47,7 +47,7 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
 		     * In active scans, we only scan channels not
 		     * marked as passive.
 		     */
-		    (passive || !(flags & IEEE80211_CHAN_PASSIVE_SCAN))) {
+		    (passive || !(flags & IEEE80211_CHAN_NO_IR))) {
 			wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
 				     req->channels[i]->band,
 				     req->channels[i]->center_freq);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 34d9dfff2ad3..9b2ecf52449f 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1688,7 +1688,7 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
 
 			if (channel->flags & (IEEE80211_CHAN_DISABLED |
 					      IEEE80211_CHAN_RADAR |
-					      IEEE80211_CHAN_PASSIVE_SCAN))
+					      IEEE80211_CHAN_NO_IR))
 				continue;
 
 			ch_bit_idx = wlcore_get_reg_conf_ch_idx(b, ch);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 0368b9cbfb89..b46b3116cc55 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -91,8 +91,7 @@ static void wl1271_reg_notify(struct wiphy *wiphy,
 			continue;
 
 		if (ch->flags & IEEE80211_CHAN_RADAR)
-			ch->flags |= IEEE80211_CHAN_NO_IBSS |
-				     IEEE80211_CHAN_PASSIVE_SCAN;
+			ch->flags |= IEEE80211_CHAN_NO_IR;
 
 	}
 
@@ -4458,6 +4457,16 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
 	if (ret < 0)
 		goto out;
 
+	if ((changed & BSS_CHANGED_TXPOWER) &&
+	    bss_conf->txpower != wlvif->power_level) {
+
+		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
+		if (ret < 0)
+			goto out;
+
+		wlvif->power_level = bss_conf->txpower;
+	}
+
 	if (is_ap)
 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
 	else
@@ -5711,7 +5720,6 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
 
 	/* unit us */
 	/* FIXME: find a proper value */
-	wl->hw->channel_change_time = 10000;
 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
 
 	wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index 7ed86203304b..1e3d51cd673a 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -188,16 +188,14 @@ wlcore_scan_get_channels(struct wl1271 *wl,
 		flags = req_channels[i]->flags;
 
 		if (force_passive)
-			flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+			flags |= IEEE80211_CHAN_NO_IR;
 
 		if ((req_channels[i]->band == band) &&
 		    !(flags & IEEE80211_CHAN_DISABLED) &&
 		    (!!(flags & IEEE80211_CHAN_RADAR) == radar) &&
 		    /* if radar is set, we ignore the passive flag */
 		    (radar ||
-		     !!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
-
-
+		     !!(flags & IEEE80211_CHAN_NO_IR) == passive)) {
 			if (flags & IEEE80211_CHAN_RADAR) {
 				channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
 
@@ -220,7 +218,7 @@ wlcore_scan_get_channels(struct wl1271 *wl,
 			    (band == IEEE80211_BAND_2GHZ) &&
 			    (channels[j].channel >= 12) &&
 			    (channels[j].channel <= 14) &&
-			    (flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+			    (flags & IEEE80211_CHAN_NO_IR) &&
 			    !force_passive) {
 				/* pactive channels treated as DFS */
 				channels[j].flags = SCAN_CHANNEL_FLAGS_DFS;
@@ -243,8 +241,8 @@ wlcore_scan_get_channels(struct wl1271 *wl,
 				     max_dwell_time_active,
 				     flags & IEEE80211_CHAN_RADAR ?
 					", DFS" : "",
-				     flags & IEEE80211_CHAN_PASSIVE_SCAN ?
-					", PASSIVE" : "");
+				     flags & IEEE80211_CHAN_NO_IR ?
+					", NO-IR" : "");
 			j++;
 		}
 	}
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 38d2089f338a..d24d4a958c67 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -29,7 +29,6 @@
 
 #include <linux/delay.h>
 #include <linux/types.h>
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/in.h>
 #include <linux/kernel.h>
@@ -44,6 +43,7 @@
 #include <linux/string.h>
 #include <linux/wireless.h>
 #include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
 
 #include <net/iw_handler.h>
 
@@ -673,8 +673,7 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
 				matchflag = 1;
 			if (matchflag) {
 				for (i = 0; i < this->bss_cnt; i++) {
-					if (!memcmp(this->bss_set[i].bssid,
-						    sig.bssid, ETH_ALEN)) {
+					if (ether_addr_equal_unaligned(this->bss_set[i].bssid, sig.bssid)) {
 						matchflag = 0;
 						break;
 					}
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 71ab320fae82..73a49b868035 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /* This file implements all the hardware specific functions for the ZD1211
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 7ab922209b25..b03786c9f3aa 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #ifndef _ZD_CHIP_H
diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h
index 9a1b013f81be..41bd755bc135 100644
--- a/drivers/net/wireless/zd1211rw/zd_def.h
+++ b/drivers/net/wireless/zd1211rw/zd_def.h
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #ifndef _ZD_DEF_H
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index c6208a7988e4..e7af261e9198 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -16,8 +16,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/netdevice.h>
@@ -533,9 +532,8 @@ void zd_mac_tx_failed(struct urb *urb)
 		tx_hdr = (struct ieee80211_hdr *)skb->data;
 
 		/* we skip all frames not matching the reported destination */
-		if (unlikely(memcmp(tx_hdr->addr1, tx_status->mac, ETH_ALEN))) {
+		if (unlikely(!ether_addr_equal(tx_hdr->addr1, tx_status->mac)))
 			continue;
-		}
 
 		/* we skip all frames not matching the reported final rate */
 
@@ -998,7 +996,7 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
 		    continue;
 
 		tx_hdr = (struct ieee80211_hdr *)skb->data;
-		if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
+		if (likely(ether_addr_equal(tx_hdr->addr2, rx_hdr->addr1)))
 		{
 			found = 1;
 			break;
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index c01eca859f95..5a484235308f 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #ifndef _ZD_MAC_H
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.c b/drivers/net/wireless/zd1211rw/zd_rf.c
index c875ee05e22e..dc179c414518 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf.c
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/errno.h>
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.h b/drivers/net/wireless/zd1211rw/zd_rf.h
index 725b7c99b23d..8f14e25e1041 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.h
+++ b/drivers/net/wireless/zd1211rw/zd_rf.h
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #ifndef _ZD_RF_H
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
index 12babcb633c3..99aed7d78952 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
index 385c670d1293..5fea485be574 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
index 784d9ccb8fef..a93f657a41c7 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
index c4d324e19c24..61b924027356 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 84d94f572a46..a912dc051111 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -15,8 +15,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 45e3bb28a01c..a9075f225178 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -14,8 +14,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 #ifndef _ZD_USB_H
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index c47794b9d42f..4c76bcb9a879 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -143,12 +143,11 @@ struct xenvif {
 	char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
 	struct xen_netif_rx_back_ring rx;
 	struct sk_buff_head rx_queue;
-
-	/* Allow xenvif_start_xmit() to peek ahead in the rx request
-	 * ring.  This is a prediction of what rx_req_cons will be
-	 * once all queued skbs are put on the ring.
+	bool rx_queue_stopped;
+	/* Set when the RX interrupt is triggered by the frontend.
+	 * The worker thread may need to wake the queue.
 	 */
-	RING_IDX rx_req_cons_peek;
+	bool rx_event;
 
 	/* This array is allocated seperately as it is large */
 	struct gnttab_copy *grant_copy_op;
@@ -205,8 +204,6 @@ void xenvif_xenbus_fini(void);
 
 int xenvif_schedulable(struct xenvif *vif);
 
-int xenvif_rx_ring_full(struct xenvif *vif);
-
 int xenvif_must_stop_queue(struct xenvif *vif);
 
 /* (Un)Map communication rings. */
@@ -218,21 +215,20 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
 /* Check for SKBs from frontend and schedule backend processing */
 void xenvif_check_rx_xenvif(struct xenvif *vif);
 
-/* Queue an SKB for transmission to the frontend */
-void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
-/* Notify xenvif that ring now has space to send an skb to the frontend */
-void xenvif_notify_tx_completion(struct xenvif *vif);
-
 /* Prevent the device from generating any further traffic. */
 void xenvif_carrier_off(struct xenvif *vif);
 
-/* Returns number of ring slots required to send an skb to the frontend */
-unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
-
 int xenvif_tx_action(struct xenvif *vif, int budget);
-void xenvif_rx_action(struct xenvif *vif);
 
 int xenvif_kthread(void *data);
+void xenvif_kick_thread(struct xenvif *vif);
+
+/* Determine whether the needed number of slots (req) are available,
+ * and set req_event if not.
+ */
+bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed);
+
+void xenvif_stop_queue(struct xenvif *vif);
 
 extern bool separate_tx_rx_irq;
 
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index fff8cddfed81..b9de31ea7fc4 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -47,11 +47,6 @@ int xenvif_schedulable(struct xenvif *vif)
 	return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
 }
 
-static int xenvif_rx_schedulable(struct xenvif *vif)
-{
-	return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif);
-}
-
 static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
 {
 	struct xenvif *vif = dev_id;
@@ -105,8 +100,8 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
 {
 	struct xenvif *vif = dev_id;
 
-	if (xenvif_rx_schedulable(vif))
-		netif_wake_queue(vif->dev);
+	vif->rx_event = true;
+	xenvif_kick_thread(vif);
 
 	return IRQ_HANDLED;
 }
@@ -122,24 +117,35 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
 static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct xenvif *vif = netdev_priv(dev);
+	int min_slots_needed;
 
 	BUG_ON(skb->dev != dev);
 
 	/* Drop the packet if vif is not ready */
-	if (vif->task == NULL)
+	if (vif->task == NULL || !xenvif_schedulable(vif))
 		goto drop;
 
-	/* Drop the packet if the target domain has no receive buffers. */
-	if (!xenvif_rx_schedulable(vif))
-		goto drop;
+	/* At best we'll need one slot for the header and one for each
+	 * frag.
+	 */
+	min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
 
-	/* Reserve ring slots for the worst-case number of fragments. */
-	vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb);
+	/* If the skb is GSO then we'll also need an extra slot for the
+	 * metadata.
+	 */
+	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
+	    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+		min_slots_needed++;
 
-	if (vif->can_queue && xenvif_must_stop_queue(vif))
-		netif_stop_queue(dev);
+	/* If the skb can't possibly fit in the remaining slots
+	 * then turn off the queue to give the ring a chance to
+	 * drain.
+	 */
+	if (!xenvif_rx_ring_slots_available(vif, min_slots_needed))
+		xenvif_stop_queue(vif);
 
-	xenvif_queue_tx_skb(vif, skb);
+	skb_queue_tail(&vif->rx_queue, skb);
+	xenvif_kick_thread(vif);
 
 	return NETDEV_TX_OK;
 
@@ -149,12 +155,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	return NETDEV_TX_OK;
 }
 
-void xenvif_notify_tx_completion(struct xenvif *vif)
-{
-	if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
-		netif_wake_queue(vif->dev);
-}
-
 static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
 {
 	struct xenvif *vif = netdev_priv(dev);
@@ -388,6 +388,8 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
 	if (err < 0)
 		goto err;
 
+	init_waitqueue_head(&vif->wq);
+
 	if (tx_evtchn == rx_evtchn) {
 		/* feature-split-event-channels == 0 */
 		err = bind_interdomain_evtchn_to_irqhandler(
@@ -420,7 +422,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
 		disable_irq(vif->rx_irq);
 	}
 
-	init_waitqueue_head(&vif->wq);
 	task = kthread_create(xenvif_kthread,
 			      (void *)vif, "%s", vif->dev->name);
 	if (IS_ERR(task)) {
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 78425554a537..6b62c3eb8e18 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -39,7 +39,6 @@
 #include <linux/udp.h>
 
 #include <net/tcp.h>
-#include <net/ip6_checksum.h>
 
 #include <xen/xen.h>
 #include <xen/events.h>
@@ -138,36 +137,26 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
 		vif->pending_prod + vif->pending_cons;
 }
 
-static int max_required_rx_slots(struct xenvif *vif)
+bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
 {
-	int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
+	RING_IDX prod, cons;
 
-	/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
-	if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask)
-		max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
-
-	return max;
-}
+	do {
+		prod = vif->rx.sring->req_prod;
+		cons = vif->rx.req_cons;
 
-int xenvif_rx_ring_full(struct xenvif *vif)
-{
-	RING_IDX peek   = vif->rx_req_cons_peek;
-	RING_IDX needed = max_required_rx_slots(vif);
+		if (prod - cons >= needed)
+			return true;
 
-	return ((vif->rx.sring->req_prod - peek) < needed) ||
-	       ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
-}
+		vif->rx.sring->req_event = prod + 1;
 
-int xenvif_must_stop_queue(struct xenvif *vif)
-{
-	if (!xenvif_rx_ring_full(vif))
-		return 0;
-
-	vif->rx.sring->req_event = vif->rx_req_cons_peek +
-		max_required_rx_slots(vif);
-	mb(); /* request notification /then/ check the queue */
+		/* Make sure event is visible before we check prod
+		 * again.
+		 */
+		mb();
+	} while (vif->rx.sring->req_prod != prod);
 
-	return xenvif_rx_ring_full(vif);
+	return false;
 }
 
 /*
@@ -210,93 +199,6 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
 	return false;
 }
 
-struct xenvif_count_slot_state {
-	unsigned long copy_off;
-	bool head;
-};
-
-unsigned int xenvif_count_frag_slots(struct xenvif *vif,
-				     unsigned long offset, unsigned long size,
-				     struct xenvif_count_slot_state *state)
-{
-	unsigned count = 0;
-
-	offset &= ~PAGE_MASK;
-
-	while (size > 0) {
-		unsigned long bytes;
-
-		bytes = PAGE_SIZE - offset;
-
-		if (bytes > size)
-			bytes = size;
-
-		if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
-			count++;
-			state->copy_off = 0;
-		}
-
-		if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
-			bytes = MAX_BUFFER_OFFSET - state->copy_off;
-
-		state->copy_off += bytes;
-
-		offset += bytes;
-		size -= bytes;
-
-		if (offset == PAGE_SIZE)
-			offset = 0;
-
-		state->head = false;
-	}
-
-	return count;
-}
-
-/*
- * Figure out how many ring slots we're going to need to send @skb to
- * the guest. This function is essentially a dry run of
- * xenvif_gop_frag_copy.
- */
-unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
-{
-	struct xenvif_count_slot_state state;
-	unsigned int count;
-	unsigned char *data;
-	unsigned i;
-
-	state.head = true;
-	state.copy_off = 0;
-
-	/* Slot for the first (partial) page of data. */
-	count = 1;
-
-	/* Need a slot for the GSO prefix for GSO extra data? */
-	if (skb_shinfo(skb)->gso_size)
-		count++;
-
-	data = skb->data;
-	while (data < skb_tail_pointer(skb)) {
-		unsigned long offset = offset_in_page(data);
-		unsigned long size = PAGE_SIZE - offset;
-
-		if (data + size > skb_tail_pointer(skb))
-			size = skb_tail_pointer(skb) - data;
-
-		count += xenvif_count_frag_slots(vif, offset, size, &state);
-
-		data += size;
-	}
-
-	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-		unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
-		unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
-
-		count += xenvif_count_frag_slots(vif, offset, size, &state);
-	}
-	return count;
-}
-
 struct netrx_pending_operations {
 	unsigned copy_prod, copy_cons;
 	unsigned meta_prod, meta_cons;
@@ -557,12 +459,12 @@ struct skb_cb_overlay {
 	int meta_slots_used;
 };
 
-static void xenvif_kick_thread(struct xenvif *vif)
+void xenvif_kick_thread(struct xenvif *vif)
 {
 	wake_up(&vif->wq);
 }
 
-void xenvif_rx_action(struct xenvif *vif)
+static void xenvif_rx_action(struct xenvif *vif)
 {
 	s8 status;
 	u16 flags;
@@ -571,11 +473,10 @@ void xenvif_rx_action(struct xenvif *vif)
 	struct sk_buff *skb;
 	LIST_HEAD(notify);
 	int ret;
-	int nr_frags;
-	int count;
 	unsigned long offset;
 	struct skb_cb_overlay *sco;
-	int need_to_notify = 0;
+	bool need_to_notify = false;
+	bool ring_full = false;
 
 	struct netrx_pending_operations npo = {
 		.copy  = vif->grant_copy_op,
@@ -584,29 +485,47 @@ void xenvif_rx_action(struct xenvif *vif)
 
 	skb_queue_head_init(&rxq);
 
-	count = 0;
-
 	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
-		vif = netdev_priv(skb->dev);
-		nr_frags = skb_shinfo(skb)->nr_frags;
+		int max_slots_needed;
+		int i;
+
+		/* We need a cheap worse case estimate for the number of
+		 * slots we'll use.
+		 */
+
+		max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
+						skb_headlen(skb),
+						PAGE_SIZE);
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			unsigned int size;
+			size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+			max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
+		}
+		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
+		    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+			max_slots_needed++;
+
+		/* If the skb may not fit then bail out now */
+		if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
+			skb_queue_head(&vif->rx_queue, skb);
+			need_to_notify = true;
+			ring_full = true;
+			break;
+		}
 
 		sco = (struct skb_cb_overlay *)skb->cb;
 		sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
-
-		count += nr_frags + 1;
+		BUG_ON(sco->meta_slots_used > max_slots_needed);
 
 		__skb_queue_tail(&rxq, skb);
-
-		/* Filled the batch queue? */
-		/* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
-		if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
-			break;
 	}
 
 	BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
 
+	vif->rx_queue_stopped = !npo.copy_prod && ring_full;
+
 	if (!npo.copy_prod)
-		return;
+		goto done;
 
 	BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
 	gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
@@ -614,8 +533,6 @@ void xenvif_rx_action(struct xenvif *vif)
 	while ((skb = __skb_dequeue(&rxq)) != NULL) {
 		sco = (struct skb_cb_overlay *)skb->cb;
 
-		vif = netdev_priv(skb->dev);
-
 		if ((1 << vif->meta[npo.meta_cons].gso_type) &
 		    vif->gso_prefix_mask) {
 			resp = RING_GET_RESPONSE(&vif->rx,
@@ -678,28 +595,15 @@ void xenvif_rx_action(struct xenvif *vif)
 
 		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
 
-		if (ret)
-			need_to_notify = 1;
-
-		xenvif_notify_tx_completion(vif);
+		need_to_notify |= !!ret;
 
 		npo.meta_cons += sco->meta_slots_used;
 		dev_kfree_skb(skb);
 	}
 
+done:
 	if (need_to_notify)
 		notify_remote_via_irq(vif->rx_irq);
-
-	/* More work to do? */
-	if (!skb_queue_empty(&vif->rx_queue))
-		xenvif_kick_thread(vif);
-}
-
-void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
-{
-	skb_queue_tail(&vif->rx_queue, skb);
-
-	xenvif_kick_thread(vif);
 }
 
 void xenvif_check_rx_xenvif(struct xenvif *vif)
@@ -1141,265 +1045,14 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
 	}
 
 	skb_shinfo(skb)->gso_size = gso->u.gso.size;
-
-	/* Header must be checked, and gso_segs computed. */
-	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
-	skb_shinfo(skb)->gso_segs = 0;
-
-	return 0;
-}
-
-static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len,
-				  unsigned int max)
-{
-	if (skb_headlen(skb) >= len)
-		return 0;
-
-	/* If we need to pullup then pullup to the max, so we
-	 * won't need to do it again.
-	 */
-	if (max > skb->len)
-		max = skb->len;
-
-	if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
-		return -ENOMEM;
-
-	if (skb_headlen(skb) < len)
-		return -EPROTO;
+	/* gso_segs will be calculated later */
 
 	return 0;
 }
 
-/* This value should be large enough to cover a tagged ethernet header plus
- * maximally sized IP and TCP or UDP headers.
- */
-#define MAX_IP_HDR_LEN 128
-
-static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
-			     int recalculate_partial_csum)
-{
-	unsigned int off;
-	bool fragment;
-	int err;
-
-	fragment = false;
-
-	err = maybe_pull_tail(skb,
-			      sizeof(struct iphdr),
-			      MAX_IP_HDR_LEN);
-	if (err < 0)
-		goto out;
-
-	if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
-		fragment = true;
-
-	off = ip_hdrlen(skb);
-
-	err = -EPROTO;
-
-	if (fragment)
-		goto out;
-
-	switch (ip_hdr(skb)->protocol) {
-	case IPPROTO_TCP:
-		err = maybe_pull_tail(skb,
-				      off + sizeof(struct tcphdr),
-				      MAX_IP_HDR_LEN);
-		if (err < 0)
-			goto out;
-
-		if (!skb_partial_csum_set(skb, off,
-					  offsetof(struct tcphdr, check))) {
-			err = -EPROTO;
-			goto out;
-		}
-
-		if (recalculate_partial_csum)
-			tcp_hdr(skb)->check =
-				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
-						   ip_hdr(skb)->daddr,
-						   skb->len - off,
-						   IPPROTO_TCP, 0);
-		break;
-	case IPPROTO_UDP:
-		err = maybe_pull_tail(skb,
-				      off + sizeof(struct udphdr),
-				      MAX_IP_HDR_LEN);
-		if (err < 0)
-			goto out;
-
-		if (!skb_partial_csum_set(skb, off,
-					  offsetof(struct udphdr, check))) {
-			err = -EPROTO;
-			goto out;
-		}
-
-		if (recalculate_partial_csum)
-			udp_hdr(skb)->check =
-				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
-						   ip_hdr(skb)->daddr,
-						   skb->len - off,
-						   IPPROTO_UDP, 0);
-		break;
-	default:
-		goto out;
-	}
-
-	err = 0;
-
-out:
-	return err;
-}
-
-/* This value should be large enough to cover a tagged ethernet header plus
- * an IPv6 header, all options, and a maximal TCP or UDP header.
- */
-#define MAX_IPV6_HDR_LEN 256
-
-#define OPT_HDR(type, skb, off) \
-	(type *)(skb_network_header(skb) + (off))
-
-static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
-			       int recalculate_partial_csum)
-{
-	int err;
-	u8 nexthdr;
-	unsigned int off;
-	unsigned int len;
-	bool fragment;
-	bool done;
-
-	fragment = false;
-	done = false;
-
-	off = sizeof(struct ipv6hdr);
-
-	err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
-	if (err < 0)
-		goto out;
-
-	nexthdr = ipv6_hdr(skb)->nexthdr;
-
-	len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
-	while (off <= len && !done) {
-		switch (nexthdr) {
-		case IPPROTO_DSTOPTS:
-		case IPPROTO_HOPOPTS:
-		case IPPROTO_ROUTING: {
-			struct ipv6_opt_hdr *hp;
-
-			err = maybe_pull_tail(skb,
-					      off +
-					      sizeof(struct ipv6_opt_hdr),
-					      MAX_IPV6_HDR_LEN);
-			if (err < 0)
-				goto out;
-
-			hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
-			nexthdr = hp->nexthdr;
-			off += ipv6_optlen(hp);
-			break;
-		}
-		case IPPROTO_AH: {
-			struct ip_auth_hdr *hp;
-
-			err = maybe_pull_tail(skb,
-					      off +
-					      sizeof(struct ip_auth_hdr),
-					      MAX_IPV6_HDR_LEN);
-			if (err < 0)
-				goto out;
-
-			hp = OPT_HDR(struct ip_auth_hdr, skb, off);
-			nexthdr = hp->nexthdr;
-			off += ipv6_authlen(hp);
-			break;
-		}
-		case IPPROTO_FRAGMENT: {
-			struct frag_hdr *hp;
-
-			err = maybe_pull_tail(skb,
-					      off +
-					      sizeof(struct frag_hdr),
-					      MAX_IPV6_HDR_LEN);
-			if (err < 0)
-				goto out;
-
-			hp = OPT_HDR(struct frag_hdr, skb, off);
-
-			if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
-				fragment = true;
-
-			nexthdr = hp->nexthdr;
-			off += sizeof(struct frag_hdr);
-			break;
-		}
-		default:
-			done = true;
-			break;
-		}
-	}
-
-	err = -EPROTO;
-
-	if (!done || fragment)
-		goto out;
-
-	switch (nexthdr) {
-	case IPPROTO_TCP:
-		err = maybe_pull_tail(skb,
-				      off + sizeof(struct tcphdr),
-				      MAX_IPV6_HDR_LEN);
-		if (err < 0)
-			goto out;
-
-		if (!skb_partial_csum_set(skb, off,
-					  offsetof(struct tcphdr, check))) {
-			err = -EPROTO;
-			goto out;
-		}
-
-		if (recalculate_partial_csum)
-			tcp_hdr(skb)->check =
-				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-						 &ipv6_hdr(skb)->daddr,
-						 skb->len - off,
-						 IPPROTO_TCP, 0);
-		break;
-	case IPPROTO_UDP:
-		err = maybe_pull_tail(skb,
-				      off + sizeof(struct udphdr),
-				      MAX_IPV6_HDR_LEN);
-		if (err < 0)
-			goto out;
-
-		if (!skb_partial_csum_set(skb, off,
-					  offsetof(struct udphdr, check))) {
-			err = -EPROTO;
-			goto out;
-		}
-
-		if (recalculate_partial_csum)
-			udp_hdr(skb)->check =
-				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-						 &ipv6_hdr(skb)->daddr,
-						 skb->len - off,
-						 IPPROTO_UDP, 0);
-		break;
-	default:
-		goto out;
-	}
-
-	err = 0;
-
-out:
-	return err;
-}
-
 static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
 {
-	int err = -EPROTO;
-	int recalculate_partial_csum = 0;
+	bool recalculate_partial_csum = false;
 
 	/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
 	 * peers can fail to set NETRXF_csum_blank when sending a GSO
@@ -1409,19 +1062,14 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
 	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
 		vif->rx_gso_checksum_fixup++;
 		skb->ip_summed = CHECKSUM_PARTIAL;
-		recalculate_partial_csum = 1;
+		recalculate_partial_csum = true;
 	}
 
 	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
 	if (skb->ip_summed != CHECKSUM_PARTIAL)
 		return 0;
 
-	if (skb->protocol == htons(ETH_P_IP))
-		err = checksum_setup_ip(vif, skb, recalculate_partial_csum);
-	else if (skb->protocol == htons(ETH_P_IPV6))
-		err = checksum_setup_ipv6(vif, skb, recalculate_partial_csum);
-
-	return err;
+	return skb_checksum_setup(skb, recalculate_partial_csum);
 }
 
 static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
@@ -1687,6 +1335,20 @@ static int xenvif_tx_submit(struct xenvif *vif)
 
 		skb_probe_transport_header(skb, 0);
 
+		/* If the packet is GSO then we will have just set up the
+		 * transport header offset in checksum_setup so it's now
+		 * straightforward to calculate gso_segs.
+		 */
+		if (skb_is_gso(skb)) {
+			int mss = skb_shinfo(skb)->gso_size;
+			int hdrlen = skb_transport_header(skb) -
+				skb_mac_header(skb) +
+				tcp_hdrlen(skb);
+
+			skb_shinfo(skb)->gso_segs =
+				DIV_ROUND_UP(skb->len - hdrlen, mss);
+		}
+
 		vif->dev->stats.rx_bytes += skb->len;
 		vif->dev->stats.rx_packets++;
 
@@ -1811,7 +1473,8 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
 
 static inline int rx_work_todo(struct xenvif *vif)
 {
-	return !skb_queue_empty(&vif->rx_queue);
+	return (!skb_queue_empty(&vif->rx_queue) && !vif->rx_queue_stopped) ||
+		vif->rx_event;
 }
 
 static inline int tx_work_todo(struct xenvif *vif)
@@ -1861,8 +1524,6 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
 	rxs = (struct xen_netif_rx_sring *)addr;
 	BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
 
-	vif->rx_req_cons_peek = 0;
-
 	return 0;
 
 err:
@@ -1870,9 +1531,24 @@ err:
 	return err;
 }
 
+void xenvif_stop_queue(struct xenvif *vif)
+{
+	if (!vif->can_queue)
+		return;
+
+	netif_stop_queue(vif->dev);
+}
+
+static void xenvif_start_queue(struct xenvif *vif)
+{
+	if (xenvif_schedulable(vif))
+		netif_wake_queue(vif->dev);
+}
+
 int xenvif_kthread(void *data)
 {
 	struct xenvif *vif = data;
+	struct sk_buff *skb;
 
 	while (!kthread_should_stop()) {
 		wait_event_interruptible(vif->wq,
@@ -1881,12 +1557,22 @@ int xenvif_kthread(void *data)
 		if (kthread_should_stop())
 			break;
 
-		if (rx_work_todo(vif))
+		if (!skb_queue_empty(&vif->rx_queue))
 			xenvif_rx_action(vif);
 
+		vif->rx_event = false;
+
+		if (skb_queue_empty(&vif->rx_queue) &&
+		    netif_queue_stopped(vif->dev))
+			xenvif_start_queue(vif);
+
 		cond_resched();
 	}
 
+	/* Bin any remaining skbs */
+	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL)
+		dev_kfree_skb(skb);
+
 	return 0;
 }
 
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index f0358992b04f..7a206cffb062 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -15,8 +15,7 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
 */
 
 #include "common.h"
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 2ab82fe75ede..e955c5692986 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -617,7 +617,9 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		tx->flags |= XEN_NETTXF_extra_info;
 
 		gso->u.gso.size = skb_shinfo(skb)->gso_size;
-		gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
+		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
+			XEN_NETIF_GSO_TYPE_TCPV6 :
+			XEN_NETIF_GSO_TYPE_TCPV4;
 		gso->u.gso.pad = 0;
 		gso->u.gso.features = 0;
 
@@ -809,15 +811,18 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
 		return -EINVAL;
 	}
 
-	/* Currently only TCPv4 S.O. is supported. */
-	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
+	    gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
 		if (net_ratelimit())
 			pr_warn("Bad GSO type %d\n", gso->u.gso.type);
 		return -EINVAL;
 	}
 
 	skb_shinfo(skb)->gso_size = gso->u.gso.size;
-	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+	skb_shinfo(skb)->gso_type =
+		(gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
+		SKB_GSO_TCPV4 :
+		SKB_GSO_TCPV6;
 
 	/* Header must be checked, and gso_segs computed. */
 	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
@@ -859,9 +864,7 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
 
 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 {
-	struct iphdr *iph;
-	int err = -EPROTO;
-	int recalculate_partial_csum = 0;
+	bool recalculate_partial_csum = false;
 
 	/*
 	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
@@ -873,54 +876,14 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 		struct netfront_info *np = netdev_priv(dev);
 		np->rx_gso_checksum_fixup++;
 		skb->ip_summed = CHECKSUM_PARTIAL;
-		recalculate_partial_csum = 1;
+		recalculate_partial_csum = true;
 	}
 
 	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
 	if (skb->ip_summed != CHECKSUM_PARTIAL)
 		return 0;
 
-	if (skb->protocol != htons(ETH_P_IP))
-		goto out;
-
-	iph = (void *)skb->data;
-
-	switch (iph->protocol) {
-	case IPPROTO_TCP:
-		if (!skb_partial_csum_set(skb, 4 * iph->ihl,
-					  offsetof(struct tcphdr, check)))
-			goto out;
-
-		if (recalculate_partial_csum) {
-			struct tcphdr *tcph = tcp_hdr(skb);
-			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
-							 skb->len - iph->ihl*4,
-							 IPPROTO_TCP, 0);
-		}
-		break;
-	case IPPROTO_UDP:
-		if (!skb_partial_csum_set(skb, 4 * iph->ihl,
-					  offsetof(struct udphdr, check)))
-			goto out;
-
-		if (recalculate_partial_csum) {
-			struct udphdr *udph = udp_hdr(skb);
-			udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
-							 skb->len - iph->ihl*4,
-							 IPPROTO_UDP, 0);
-		}
-		break;
-	default:
-		if (net_ratelimit())
-			pr_err("Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
-			       iph->protocol);
-		goto out;
-	}
-
-	err = 0;
-
-out:
-	return err;
+	return skb_checksum_setup(skb, recalculate_partial_csum);
 }
 
 static int handle_incoming_queue(struct net_device *dev,
@@ -1233,6 +1196,15 @@ static netdev_features_t xennet_fix_features(struct net_device *dev,
 			features &= ~NETIF_F_SG;
 	}
 
+	if (features & NETIF_F_IPV6_CSUM) {
+		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
+				 "feature-ipv6-csum-offload", "%d", &val) < 0)
+			val = 0;
+
+		if (!val)
+			features &= ~NETIF_F_IPV6_CSUM;
+	}
+
 	if (features & NETIF_F_TSO) {
 		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
 				 "feature-gso-tcpv4", "%d", &val) < 0)
@@ -1242,6 +1214,15 @@ static netdev_features_t xennet_fix_features(struct net_device *dev,
 			features &= ~NETIF_F_TSO;
 	}
 
+	if (features & NETIF_F_TSO6) {
+		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
+				 "feature-gso-tcpv6", "%d", &val) < 0)
+			val = 0;
+
+		if (!val)
+			features &= ~NETIF_F_TSO6;
+	}
+
 	return features;
 }
 
@@ -1380,7 +1361,9 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
 	netif_napi_add(netdev, &np->napi, xennet_poll, 64);
 	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
 				  NETIF_F_GSO_ROBUST;
-	netdev->hw_features	= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
+	netdev->hw_features	= NETIF_F_SG |
+				  NETIF_F_IPV6_CSUM |
+				  NETIF_F_TSO | NETIF_F_TSO6;
 
 	/*
          * Assume that all hw features are available for now. This set
@@ -1758,6 +1741,19 @@ again:
 		goto abort_transaction;
 	}
 
+	err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
+	if (err) {
+		message = "writing feature-gso-tcpv6";
+		goto abort_transaction;
+	}
+
+	err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
+			   "1");
+	if (err) {
+		message = "writing feature-ipv6-csum-offload";
+		goto abort_transaction;
+	}
+
 	err = xenbus_transaction_end(xbt, 0);
 	if (err) {
 		if (err == -EAGAIN)